From fa4d4a15ff002a8b4eb1f734b590ac2929936f1b Mon Sep 17 00:00:00 2001 From: Michal Fojtik Date: Mon, 19 Feb 2018 11:10:08 +0100 Subject: [PATCH 1/4] bump(*): update etcd to 3.2.16 and grpc to 1.7.5 --- Godeps/Godeps.json | 288 +-- glide.lock | 39 +- glide.yaml | 6 +- vendor/github.com/boltdb/bolt/Makefile | 18 - .../boltdb/bolt/cmd/bolt/main_test.go | 185 -- .../github.com/boltdb/bolt/freelist_test.go | 158 -- .../{boltdb/bolt => coreos/bbolt}/.gitignore | 1 + .../{boltdb/bolt => coreos/bbolt}/LICENSE | 0 vendor/github.com/coreos/bbolt/Makefile | 30 + .../{boltdb/bolt => coreos/bbolt}/README.md | 100 +- .../bolt => coreos/bbolt}/appveyor.yml | 0 .../{boltdb/bolt => coreos/bbolt}/bolt_386.go | 3 + .../bolt => coreos/bbolt}/bolt_amd64.go | 3 + vendor/github.com/coreos/bbolt/bolt_arm.go | 28 + .../bolt => coreos/bbolt}/bolt_arm64.go | 3 + .../bolt => coreos/bbolt}/bolt_linux.go | 0 .../github.com/coreos/bbolt/bolt_mips64x.go | 12 + .../bbolt/bolt_mipsx.go} | 7 +- .../bolt => coreos/bbolt}/bolt_openbsd.go | 0 .../{boltdb/bolt => coreos/bbolt}/bolt_ppc.go | 0 .../bolt => coreos/bbolt}/bolt_ppc64.go | 3 + .../bolt => coreos/bbolt}/bolt_ppc64le.go | 3 + .../bolt => coreos/bbolt}/bolt_s390x.go | 3 + .../bolt => coreos/bbolt}/bolt_unix.go | 33 +- .../bbolt}/bolt_unix_solaris.go | 39 +- .../bolt => coreos/bbolt}/bolt_windows.go | 33 +- .../bolt => coreos/bbolt}/boltsync_unix.go | 0 .../{boltdb/bolt => coreos/bbolt}/bucket.go | 49 +- .../bolt => coreos/bbolt}/bucket_test.go | 106 +- .../bolt => coreos/bbolt}/cmd/bolt/main.go | 668 ++++- .../coreos/bbolt/cmd/bolt/main_test.go | 456 ++++ .../{boltdb/bolt => coreos/bbolt}/cursor.go | 0 .../bolt => coreos/bbolt}/cursor_test.go | 2 +- .../{boltdb/bolt => coreos/bbolt}/db.go | 207 +- .../{boltdb/bolt => coreos/bbolt}/db_test.go | 465 ++-- .../{boltdb/bolt => coreos/bbolt}/doc.go | 0 .../{boltdb/bolt => coreos/bbolt}/errors.go | 0 .../{boltdb/bolt => coreos/bbolt}/freelist.go | 167 +- .../github.com/coreos/bbolt/freelist_test.go | 288 +++ .../{boltdb/bolt => coreos/bbolt}/node.go | 2 +- .../bolt => coreos/bbolt}/node_test.go | 0 .../{boltdb/bolt => coreos/bbolt}/page.go | 31 +- .../bolt => coreos/bbolt}/page_test.go | 0 .../bolt => coreos/bbolt}/quick_test.go | 10 +- .../bbolt/simulation_no_freelist_sync_test.go | 47 + .../bolt => coreos/bbolt}/simulation_test.go | 153 +- .../{boltdb/bolt => coreos/bbolt}/tx.go | 81 +- .../{boltdb/bolt => coreos/bbolt}/tx_test.go | 155 +- vendor/github.com/coreos/etcd/.gitignore | 7 + vendor/github.com/coreos/etcd/.semaphore.sh | 16 + vendor/github.com/coreos/etcd/.travis.yml | 73 +- vendor/github.com/coreos/etcd/Dockerfile-test | 57 + .../Documentation/learning/auth_design.md | 2 +- .../etcd/Documentation/op-guide/monitoring.md | 5 - vendor/github.com/coreos/etcd/auth/store.go | 2 +- .../github.com/coreos/etcd/auth/store_test.go | 11 +- .../coreos/etcd/bill-of-materials.json | 21 +- .../github.com/coreos/etcd/clientv3/README.md | 2 +- .../github.com/coreos/etcd/clientv3/auth.go | 57 +- .../coreos/etcd/clientv3/balancer.go | 356 --- .../coreos/etcd/clientv3/balancer_test.go | 239 -- .../github.com/coreos/etcd/clientv3/client.go | 105 +- .../coreos/etcd/clientv3/client_test.go | 10 +- .../clientv3/clientv3util/example_key_test.go | 5 +- .../coreos/etcd/clientv3/cluster.go | 59 +- .../coreos/etcd/clientv3/compact_op.go | 6 +- .../coreos/etcd/clientv3/compare.go | 10 + .../etcd/clientv3/concurrency/election.go | 9 +- .../coreos/etcd/clientv3/concurrency/key.go | 1 + .../coreos/etcd/clientv3/concurrency/mutex.go | 11 +- .../etcd/clientv3/concurrency/session.go | 2 + .../coreos/etcd/clientv3/concurrency/stm.go | 5 +- .../github.com/coreos/etcd/clientv3/config.go | 23 +- vendor/github.com/coreos/etcd/clientv3/doc.go | 2 +- .../coreos/etcd/clientv3/example_auth_test.go | 113 - .../etcd/clientv3/example_cluster_test.go | 1 + .../coreos/etcd/clientv3/example_kv_test.go | 8 +- .../etcd/clientv3/example_lease_test.go | 1 + .../etcd/clientv3/example_maintenence_test.go | 18 +- .../etcd/clientv3/example_metrics_test.go | 9 +- .../coreos/etcd/clientv3/example_test.go | 7 +- .../etcd/clientv3/example_watch_test.go | 1 + .../coreos/etcd/clientv3/grpc_options.go | 46 + .../coreos/etcd/clientv3/health_balancer.go | 627 +++++ .../clientv3/integration/black_hole_test.go | 211 ++ .../etcd/clientv3/integration/cluster_test.go | 34 + .../etcd/clientv3/integration/dial_test.go | 43 +- .../etcd/clientv3/integration/kv_test.go | 177 +- .../etcd/clientv3/integration/lease_test.go | 36 +- .../etcd/clientv3/integration/logger_test.go | 12 +- .../etcd/clientv3/integration/metrics_test.go | 4 +- .../etcd/clientv3/integration/mirror_test.go | 1 + .../clientv3/integration/namespace_test.go | 3 +- .../integration/network_partition_test.go | 260 ++ .../etcd/clientv3/integration/role_test.go | 1 + .../integration/server_shutdown_test.go | 352 +++ .../etcd/clientv3/integration/txn_test.go | 4 +- .../etcd/clientv3/integration/user_test.go | 3 +- .../coreos/etcd/clientv3/integration/util.go | 35 + .../etcd/clientv3/integration/watch_test.go | 50 +- vendor/github.com/coreos/etcd/clientv3/kv.go | 72 +- .../github.com/coreos/etcd/clientv3/lease.go | 110 +- .../github.com/coreos/etcd/clientv3/logger.go | 34 +- .../coreos/etcd/clientv3/main_test.go | 10 +- .../coreos/etcd/clientv3/maintenance.go | 47 +- .../coreos/etcd/clientv3/mirror/syncer.go | 1 + .../coreos/etcd/clientv3/namespace/kv.go | 4 +- .../coreos/etcd/clientv3/namespace/lease.go | 4 +- .../coreos/etcd/clientv3/namespace/watch.go | 4 +- .../coreos/etcd/clientv3/naming/grpc.go | 11 +- .../coreos/etcd/clientv3/naming/grpc_test.go | 13 +- vendor/github.com/coreos/etcd/clientv3/op.go | 90 +- .../coreos/etcd/clientv3/ready_wait.go | 30 + .../github.com/coreos/etcd/clientv3/retry.go | 376 ++- vendor/github.com/coreos/etcd/clientv3/txn.go | 29 +- .../github.com/coreos/etcd/clientv3/watch.go | 75 +- .../coreos/etcd/clientv3/yaml/config.go | 13 +- .../coreos/etcd/clientv3/yaml/config_test.go | 10 +- .../coreos/etcd/compactor/compactor.go | 2 +- .../coreos/etcd/e2e/docker/Dockerfile | 35 +- vendor/github.com/coreos/etcd/embed/config.go | 67 +- vendor/github.com/coreos/etcd/embed/etcd.go | 187 +- vendor/github.com/coreos/etcd/embed/serve.go | 37 +- .../etcd/etcdctl/ctlv3/command/global.go | 6 +- .../etcdctl/ctlv3/command/snapshot_command.go | 2 +- .../github.com/coreos/etcd/etcdmain/config.go | 4 + .../github.com/coreos/etcd/etcdmain/etcd.go | 19 +- .../github.com/coreos/etcd/etcdmain/help.go | 8 + .../etcd/etcdserver/api/v3client/v3client.go | 10 +- .../v3electionpb/gw/v3election.pb.gw.go | 70 +- .../v3election/v3electionpb/v3election.pb.go | 65 + .../api/v3lock/v3lockpb/gw/v3lock.pb.gw.go | 37 +- .../api/v3lock/v3lockpb/v3lock.pb.go | 30 + .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 30 +- .../etcd/etcdserver/api/v3rpc/interceptor.go | 4 +- .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 10 + .../etcdserver/api/v3rpc/rpctypes/error.go | 8 + .../coreos/etcd/etcdserver/api/v3rpc/util.go | 36 + .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 20 + .../coreos/etcd/etcdserver/config.go | 5 + .../etcdserver/etcdserverpb/gw/rpc.pb.gw.go | 446 ++-- .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 780 +++++- .../coreos/etcd/etcdserver/server.go | 7 +- .../coreos/etcd/etcdserver/v3_server.go | 8 +- vendor/github.com/coreos/etcd/glide.lock | 42 +- vendor/github.com/coreos/etcd/glide.yaml | 35 +- .../coreos/etcd/hack/patch/README.md | 37 + .../coreos/etcd/hack/patch/cherrypick.sh | 229 ++ .../coreos/etcd/hack/scripts-dev/Makefile | 294 +++ .../coreos/etcd/hack/scripts-dev/README | 2 + .../scripts-dev/docker-dns-srv/Dockerfile | 44 + .../docker-dns-srv/certs-gateway/Procfile | 7 + .../docker-dns-srv/certs-gateway/ca-csr.json | 19 + .../docker-dns-srv/certs-gateway/ca.crt | 22 + .../docker-dns-srv/certs-gateway/gencert.json | 13 + .../docker-dns-srv/certs-gateway/gencerts.sh | 26 + .../docker-dns-srv/certs-gateway/run.sh | 47 + .../certs-gateway/server-ca-csr.json | 23 + .../docker-dns-srv/certs-gateway/server.crt | 25 + .../certs-gateway/server.key.insecure | 27 + .../docker-dns-srv/certs-wildcard/Procfile | 5 + .../docker-dns-srv/certs-wildcard/ca-csr.json | 19 + .../docker-dns-srv/certs-wildcard/ca.crt | 22 + .../certs-wildcard/gencert.json | 13 + .../docker-dns-srv/certs-wildcard/gencerts.sh | 26 + .../docker-dns-srv/certs-wildcard/run.sh | 33 + .../certs-wildcard/server-ca-csr.json | 21 + .../docker-dns-srv/certs-wildcard/server.crt | 24 + .../certs-wildcard/server.key.insecure | 27 + .../scripts-dev/docker-dns-srv/certs/Procfile | 5 + .../docker-dns-srv/certs/ca-csr.json | 19 + .../scripts-dev/docker-dns-srv/certs/ca.crt | 22 + .../docker-dns-srv/certs/gencert.json | 13 + .../docker-dns-srv/certs/gencerts.sh | 26 + .../scripts-dev/docker-dns-srv/certs/run.sh | 33 + .../docker-dns-srv/certs/server-ca-csr.json | 23 + .../docker-dns-srv/certs/server.crt | 25 + .../docker-dns-srv/certs/server.key.insecure | 27 + .../hack/scripts-dev/docker-dns-srv/etcd.zone | 21 + .../scripts-dev/docker-dns-srv/named.conf | 23 + .../hack/scripts-dev/docker-dns-srv/rdns.zone | 13 + .../scripts-dev/docker-dns-srv/resolv.conf | 1 + .../hack/scripts-dev/docker-dns/Dockerfile | 44 + .../docker-dns/certs-common-name/Procfile | 6 + .../docker-dns/certs-common-name/ca-csr.json | 19 + .../docker-dns/certs-common-name/ca.crt | 22 + .../docker-dns/certs-common-name/gencert.json | 13 + .../docker-dns/certs-common-name/gencerts.sh | 26 + .../docker-dns/certs-common-name/run.sh | 255 ++ .../certs-common-name/server-ca-csr.json | 23 + .../docker-dns/certs-common-name/server.crt | 25 + .../certs-common-name/server.key.insecure | 27 + .../docker-dns/certs-gateway/Procfile | 8 + .../docker-dns/certs-gateway/ca-csr.json | 19 + .../docker-dns/certs-gateway/ca.crt | 22 + .../docker-dns/certs-gateway/gencert.json | 13 + .../docker-dns/certs-gateway/gencerts.sh | 26 + .../docker-dns/certs-gateway/run.sh | 47 + .../certs-gateway/server-ca-csr.json | 22 + .../docker-dns/certs-gateway/server.crt | 25 + .../certs-gateway/server.key.insecure | 27 + .../docker-dns/certs-wildcard/Procfile | 6 + .../docker-dns/certs-wildcard/ca-csr.json | 19 + .../docker-dns/certs-wildcard/ca.crt | 22 + .../docker-dns/certs-wildcard/gencert.json | 13 + .../docker-dns/certs-wildcard/gencerts.sh | 26 + .../docker-dns/certs-wildcard/run.sh | 33 + .../certs-wildcard/server-ca-csr.json | 20 + .../docker-dns/certs-wildcard/server.crt | 24 + .../certs-wildcard/server.key.insecure | 27 + .../scripts-dev/docker-dns/certs/Procfile | 6 + .../scripts-dev/docker-dns/certs/ca-csr.json | 19 + .../hack/scripts-dev/docker-dns/certs/ca.crt | 22 + .../scripts-dev/docker-dns/certs/gencert.json | 13 + .../scripts-dev/docker-dns/certs/gencerts.sh | 26 + .../hack/scripts-dev/docker-dns/certs/run.sh | 33 + .../docker-dns/certs/server-ca-csr.json | 22 + .../scripts-dev/docker-dns/certs/server.crt | 25 + .../docker-dns/certs/server.key.insecure | 27 + .../hack/scripts-dev/docker-dns/etcd.zone | 14 + .../hack/scripts-dev/docker-dns/named.conf | 23 + .../hack/scripts-dev/docker-dns/rdns.zone | 13 + .../hack/scripts-dev/docker-dns/resolv.conf | 1 + .../coreos/etcd/integration/bridge.go | 67 +- .../coreos/etcd/integration/cluster.go | 120 +- .../coreos/etcd/integration/cluster_proxy.go | 6 +- .../coreos/etcd/integration/embed_test.go | 34 +- .../integration/network_partition_test.go | 4 +- .../coreos/etcd/integration/v3_auth_test.go | 52 + .../coreos/etcd/integration/v3_grpc_test.go | 49 +- .../coreos/etcd/integration/v3_lease_test.go | 4 +- .../coreos/etcd/mvcc/backend/backend.go | 4 +- .../coreos/etcd/mvcc/backend/backend_test.go | 2 +- .../coreos/etcd/mvcc/backend/batch_tx.go | 2 +- .../coreos/etcd/mvcc/backend/batch_tx_test.go | 2 +- .../etcd/mvcc/backend/config_default.go | 2 +- .../coreos/etcd/mvcc/backend/config_linux.go | 2 +- .../etcd/mvcc/backend/config_windows.go | 2 +- .../coreos/etcd/mvcc/backend/read_tx.go | 2 +- .../coreos/etcd/mvcc/watchable_store.go | 2 +- .../coreos/etcd/mvcc/watchable_store_test.go | 65 +- .../etcd/proxy/grpcproxy/cluster_test.go | 13 +- .../coreos/etcd/proxy/grpcproxy/lease.go | 2 +- .../coreos/etcd/proxy/grpcproxy/watch.go | 2 +- .../coreos/etcd/raft/raftpb/raft.pb.go | 70 +- .../coreos/etcd/scripts/build-docker | 26 +- .../coreos/etcd/scripts/genproto.sh | 8 +- vendor/github.com/coreos/etcd/test | 147 +- .../coreos/etcd/tools/benchmark/cmd/util.go | 5 +- .../coreos/etcd/tools/etcd-dump-db/backend.go | 2 +- .../functional-tester/etcd-tester/stresser.go | 3 - .../github.com/coreos/etcd/version/version.go | 2 +- .../grpc-ecosystem/grpc-gateway/.travis.yml | 16 +- .../grpc-ecosystem/grpc-gateway/CHANGELOG.md | 169 ++ .../grpc-gateway/CONTRIBUTING.md | 2 +- .../grpc-ecosystem/grpc-gateway/Makefile | 41 +- .../grpc-ecosystem/grpc-gateway/README.md | 41 +- .../examples/browser/package.json | 2 +- .../grpc-gateway/examples/client_test.go | 10 +- .../examples/clients/abe/.gitignore | 1 + .../clients/abe/.swagger-codegen-ignore | 1 + .../clients/abe/ABitOfEverythingNested.go | 11 - .../clients/abe/ABitOfEverythingServiceApi.go | 648 ----- .../clients/abe/ExamplepbABitOfEverything.go | 35 - .../clients/abe/ExamplepbNumericEnum.go | 8 - .../examples/clients/abe/NestedDeepEnum.go | 8 - .../examples/clients/abe/ProtobufDuration.go | 10 + .../examples/clients/abe/ProtobufEmpty.go | 8 - .../examples/clients/abe/Sub2IdMessage.go | 9 - .../examples/clients/abe/SubStringMessage.go | 9 - .../clients/abe/a_bit_of_everything_nested.go | 22 + .../abe/a_bit_of_everything_service_api.go | 798 ++++++ .../examples/clients/abe/api_client.go | 164 ++ .../examples/clients/abe/api_response.go | 44 + .../examples/clients/abe/configuration.go | 67 + .../abe/examplepb_a_bit_of_everything.go | 72 + .../clients/abe/examplepb_numeric_enum.go | 15 + .../examples/clients/abe/nested_deep_enum.go | 15 + .../examples/clients/abe/protobuf_empty.go | 15 + .../clients/abe/sub_string_message.go | 16 + .../examples/clients/echo/.gitignore | 1 + .../clients/echo/.swagger-codegen-ignore | 1 + .../examples/clients/echo/EchoServiceApi.go | 145 -- .../clients/echo/ExamplepbSimpleMessage.go | 9 - .../examples/clients/echo/api_client.go | 164 ++ .../examples/clients/echo/api_response.go | 44 + .../examples/clients/echo/configuration.go | 67 + .../examples/clients/echo/echo_service_api.go | 224 ++ .../clients/echo/examplepb_simple_message.go | 20 + .../examplepb/a_bit_of_everything.pb.go | 490 +++- .../examplepb/a_bit_of_everything.pb.gw.go | 265 +- .../examplepb/a_bit_of_everything.proto | 78 + .../a_bit_of_everything.swagger.json | 457 +++- .../examples/examplepb/echo_service.pb.go | 46 +- .../examples/examplepb/echo_service.pb.gw.go | 118 +- .../examples/examplepb/echo_service.proto | 4 + .../examplepb/echo_service.swagger.json | 39 +- .../examples/examplepb/flow_combination.pb.go | 49 +- .../examplepb/flow_combination.pb.gw.go | 403 +-- .../examples/examplepb/stream.pb.go | 7 +- .../examples/examplepb/stream.pb.gw.go | 44 +- .../grpc-gateway/examples/integration_test.go | 137 +- .../grpc-gateway/examples/main.go | 3 +- .../grpc-gateway/examples/proto_error_test.go | 170 ++ .../examples/server/a_bit_of_everything.go | 29 +- .../grpc-gateway/examples/sub/message.pb.go | 5 +- .../grpc-gateway/examples/sub2/message.pb.go | 12 +- .../descriptor/registry.go | 36 +- .../descriptor/registry_test.go | 18 + .../descriptor/services.go | 15 +- .../descriptor/services_test.go | 101 + .../gengateway/generator.go | 19 +- .../gengateway/generator_test.go | 88 + .../gengateway/template.go | 69 +- .../protoc-gen-grpc-gateway/httprule/parse.go | 2 +- .../protoc-gen-grpc-gateway/main.go | 7 +- .../protoc-gen-swagger/genswagger/template.go | 455 +++- .../genswagger/template_test.go | 294 ++- .../protoc-gen-swagger/genswagger/types.go | 86 +- .../grpc-gateway/protoc-gen-swagger/main.go | 67 +- .../protoc-gen-swagger/main_test.go | 129 + .../options/annotations.pb.go | 83 + .../options/annotations.proto | 37 + .../options/openapiv2.pb.go | 739 ++++++ .../options/openapiv2.proto | 223 ++ .../grpc-gateway/runtime/context.go | 64 +- .../grpc-gateway/runtime/context_test.go | 29 +- .../grpc-gateway/runtime/errors.go | 20 +- .../grpc-gateway/runtime/errors_test.go | 7 +- .../grpc-gateway/runtime/handler.go | 35 +- .../grpc-gateway/runtime/handler_test.go | 67 + .../runtime/internal/stream_chunk.pb.go | 33 +- .../runtime/marshal_jsonpb_test.go | 30 +- .../grpc-gateway/runtime/mux.go | 144 +- .../grpc-gateway/runtime/pattern.go | 2 +- .../grpc-gateway/runtime/proto_errors.go | 61 + .../grpc-gateway/runtime/query.go | 165 +- .../grpc-gateway/runtime/query_test.go | 490 +++- ...EADME.grcp-gateway => README.grpc-gateway} | 0 .../googleapis/google/api/annotations.pb.go | 61 - .../googleapis/google/api/annotations.proto | 2 + .../googleapis/google/api/http.pb.go | 360 --- .../googleapis/google/api/http.proto | 228 +- .../grpc/.github/ISSUE_TEMPLATE | 14 + vendor/google.golang.org/grpc/.please-update | 0 vendor/google.golang.org/grpc/.travis.yml | 19 +- vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 60 +- .../grpc/Documentation/gomock-example.md | 73 +- .../grpc/Documentation/grpc-auth-support.md | 2 +- .../grpc/Documentation/grpc-metadata.md | 12 +- vendor/google.golang.org/grpc/LICENSE | 230 +- vendor/google.golang.org/grpc/Makefile | 13 +- vendor/google.golang.org/grpc/PATENTS | 22 - vendor/google.golang.org/grpc/README.md | 12 +- vendor/google.golang.org/grpc/backoff.go | 18 + vendor/google.golang.org/grpc/backoff_test.go | 18 + vendor/google.golang.org/grpc/balancer.go | 66 +- .../grpc/balancer/balancer.go | 206 ++ .../grpc/balancer/roundrobin/roundrobin.go | 241 ++ .../balancer/roundrobin/roundrobin_test.go | 470 ++++ .../grpc/balancer_conn_wrappers.go | 252 ++ .../google.golang.org/grpc/balancer_test.go | 498 +++- .../grpc/balancer_v1_wrapper.go | 367 +++ .../grpc/benchmark/benchmain/main.go | 499 ++++ .../grpc/benchmark/benchmark.go | 180 +- .../grpc/benchmark/benchmark_test.go | 253 +- .../grpc/benchmark/benchresult/main.go | 133 + .../grpc/benchmark/client/main.go | 18 + .../grpc/benchmark/grpc_testing/control.pb.go | 373 ++- .../grpc/benchmark/grpc_testing/control.proto | 35 +- .../benchmark/grpc_testing/messages.pb.go | 222 +- .../benchmark/grpc_testing/messages.proto | 35 +- .../benchmark/grpc_testing/payloads.pb.go | 57 +- .../benchmark/grpc_testing/payloads.proto | 35 +- .../benchmark/grpc_testing/services.pb.go | 35 +- .../benchmark/grpc_testing/services.proto | 35 +- .../grpc/benchmark/grpc_testing/stats.pb.go | 149 +- .../grpc/benchmark/grpc_testing/stats.proto | 35 +- .../grpc/benchmark/latency/latency.go | 316 +++ .../grpc/benchmark/latency/latency_test.go | 353 +++ .../benchmark/primitives/primitives_test.go | 235 ++ .../grpc/benchmark/server/main.go | 18 + .../grpc/benchmark/server/testdata/ca.pem | 15 - .../benchmark/server/testdata/server1.key | 16 - .../benchmark/server/testdata/server1.pem | 16 - .../grpc/benchmark/stats/histogram.go | 30 +- .../grpc/benchmark/stats/stats.go | 187 +- .../grpc/benchmark/stats/util.go | 25 +- .../grpc/benchmark/worker/benchmark_client.go | 44 +- .../grpc/benchmark/worker/benchmark_server.go | 50 +- .../grpc/benchmark/worker/main.go | 35 +- .../grpc/benchmark/worker/util.go | 79 +- vendor/google.golang.org/grpc/call.go | 133 +- vendor/google.golang.org/grpc/call_test.go | 49 +- vendor/google.golang.org/grpc/clientconn.go | 970 ++++---- .../google.golang.org/grpc/clientconn_test.go | 133 +- vendor/google.golang.org/grpc/codec.go | 40 +- .../grpc/codec_benchmark_test.go | 35 +- vendor/google.golang.org/grpc/codec_test.go | 35 +- .../grpc/codes/code_string.go | 4 +- vendor/google.golang.org/grpc/codes/codes.go | 37 +- .../grpc/connectivity/connectivity.go | 72 + vendor/google.golang.org/grpc/coverage.sh | 48 - .../grpc/credentials/credentials.go | 47 +- .../grpc/credentials/credentials_test.go | 44 +- .../grpc/credentials/credentials_util_go17.go | 35 +- .../grpc/credentials/credentials_util_go18.go | 35 +- .../credentials/credentials_util_pre_go17.go | 35 +- .../grpc/credentials/oauth/oauth.go | 65 +- vendor/google.golang.org/grpc/doc.go | 20 +- .../google.golang.org/grpc/examples/README.md | 11 +- .../grpc/examples/gotutorial.md | 20 +- .../helloworld/greeter_client/main.go | 35 +- .../helloworld/greeter_server/main.go | 37 +- .../helloworld/helloworld/helloworld.pb.go | 29 +- .../helloworld/helloworld/helloworld.proto | 35 +- .../{mock => }/mock_helloworld/hw_mock.go | 0 .../hw_mock_test.go} | 22 +- .../grpc/examples/route_guide/README.md | 4 +- .../examples/route_guide/client/client.go | 93 +- .../route_guide/mock_routeguide/rg_mock.go | 200 ++ .../mock_routeguide/rg_mock_test.go | 82 + .../route_guide/routeguide/route_guide.pb.go | 109 +- .../route_guide/routeguide/route_guide.proto | 35 +- .../examples/route_guide/server/server.go | 62 +- .../grpc/examples/route_guide/testdata/ca.pem | 15 - .../examples/route_guide/testdata/server1.key | 16 - .../examples/route_guide/testdata/server1.pem | 16 - vendor/google.golang.org/grpc/go16.go | 56 - vendor/google.golang.org/grpc/go17.go | 55 - vendor/google.golang.org/grpc/grpclb.go | 243 +- .../grpc/grpclb/grpc_lb_v1/doc.go | 21 + .../{grpclb.pb.go => messages/messages.pb.go} | 118 +- .../{grpclb.proto => messages/messages.proto} | 50 +- .../grpclb/grpc_lb_v1/service/service.pb.go | 154 ++ .../grpclb/grpc_lb_v1/service/service.proto | 26 + .../grpc/grpclb/grpclb_server_generated.go | 54 - .../grpc/grpclb/grpclb_test.go | 289 +-- .../grpc/grpclog/glogger/glogger.go | 94 +- .../google.golang.org/grpc/grpclog/grpclog.go | 123 + .../google.golang.org/grpc/grpclog/logger.go | 106 +- .../grpc/grpclog/loggerv2.go | 195 ++ .../grpc/grpclog/loggerv2_test.go | 62 + .../grpc/health/grpc_health_v1/health.pb.go | 54 +- .../grpc/health/grpc_health_v1/health.proto | 14 + .../google.golang.org/grpc/health/health.go | 20 + vendor/google.golang.org/grpc/interceptor.go | 41 +- .../grpc/internal/internal.go | 35 +- .../grpc/interop/client/client.go | 46 +- .../grpc/interop/client/testdata/ca.pem | 15 - .../grpc/interop/client/testdata/server1.key | 16 - .../grpc/interop/client/testdata/server1.pem | 16 - .../grpc/interop/grpc_testing/test.pb.go | 97 +- .../grpc/interop/grpc_testing/test.proto | 14 + .../interop/http2/negative_http2_client.go | 35 +- .../grpc/interop/server/server.go | 46 +- .../grpc/interop/server/testdata/ca.pem | 15 - .../grpc/interop/server/testdata/server1.key | 16 - .../grpc/interop/server/testdata/server1.pem | 16 - .../grpc/interop/test_utils.go | 39 +- .../grpc/keepalive/keepalive.go | 39 +- .../grpc/metadata/metadata.go | 87 +- .../grpc/metadata/metadata_test.go | 35 +- .../grpc/naming/dns_resolver.go | 290 +++ .../grpc/naming/dns_resolver_test.go | 315 +++ vendor/google.golang.org/grpc/naming/go17.go | 34 + .../grpc/naming/go17_test.go | 42 + vendor/google.golang.org/grpc/naming/go18.go | 28 + .../grpc/naming/go18_test.go | 41 + .../google.golang.org/grpc/naming/naming.go | 35 +- vendor/google.golang.org/grpc/peer/peer.go | 38 +- .../google.golang.org/grpc/picker_wrapper.go | 141 ++ .../grpc/picker_wrapper_test.go | 160 ++ vendor/google.golang.org/grpc/pickfirst.go | 95 + .../google.golang.org/grpc/pickfirst_test.go | 352 +++ vendor/google.golang.org/grpc/proxy.go | 38 +- vendor/google.golang.org/grpc/proxy_test.go | 90 +- .../grpc_reflection_v1alpha/reflection.pb.go | 169 +- .../grpc_reflection_v1alpha/reflection.proto | 37 +- .../grpc/reflection/grpc_testing/proto2.pb.go | 5 +- .../grpc/reflection/grpc_testing/proto2.proto | 14 + .../reflection/grpc_testing/proto2_ext.pb.go | 5 +- .../reflection/grpc_testing/proto2_ext.proto | 14 + .../reflection/grpc_testing/proto2_ext2.pb.go | 5 +- .../reflection/grpc_testing/proto2_ext2.proto | 14 + .../grpc/reflection/grpc_testing/test.pb.go | 5 +- .../grpc/reflection/grpc_testing/test.proto | 14 + .../reflection/grpc_testingv3/testv3.pb.go | 236 ++ .../reflection/grpc_testingv3/testv3.proto | 21 + .../grpc/reflection/serverreflection.go | 58 +- .../grpc/reflection/serverreflection_test.go | 68 +- .../grpc/resolver/dns/dns_resolver.go | 379 +++ .../grpc/resolver/dns/dns_resolver_test.go | 898 +++++++ .../grpc/resolver/dns/go17.go | 35 + .../grpc/resolver/dns/go17_test.go | 52 + .../grpc/resolver/dns/go18.go | 29 + .../grpc/resolver/dns/go18_test.go | 51 + .../grpc/resolver/manual/manual.go | 80 + .../grpc/resolver/passthrough/passthrough.go | 58 + .../grpc/resolver/resolver.go | 143 ++ .../grpc/resolver_conn_wrapper.go | 139 ++ .../grpc/resolver_conn_wrapper_test.go | 47 + vendor/google.golang.org/grpc/rpc_util.go | 323 ++- .../google.golang.org/grpc/rpc_util_test.go | 107 +- vendor/google.golang.org/grpc/server.go | 540 ++-- vendor/google.golang.org/grpc/server_test.go | 39 +- .../grpc/stats/grpc_testing/test.pb.go | 184 +- .../grpc/stats/grpc_testing/test.proto | 20 + .../google.golang.org/grpc/stats/handlers.go | 42 +- vendor/google.golang.org/grpc/stats/stats.go | 147 +- .../grpc/stats/stats_test.go | 449 +++- .../google.golang.org/grpc/status/status.go | 73 +- .../grpc/status/status_test.go | 182 +- vendor/google.golang.org/grpc/stream.go | 256 +- .../grpc/stress/client/main.go | 47 +- .../grpc/stress/client/testdata/ca.pem | 15 - .../grpc/stress/client/testdata/server1.key | 16 - .../grpc/stress/client/testdata/server1.pem | 16 - .../grpc/stress/grpc_testing/metrics.pb.go | 53 +- .../grpc/stress/grpc_testing/metrics.proto | 37 +- .../grpc/stress/metrics_client/main.go | 35 +- vendor/google.golang.org/grpc/tap/tap.go | 55 +- .../grpc/test/bufconn/bufconn.go | 229 ++ .../grpc/test/bufconn/bufconn_test.go | 149 ++ .../grpc/test/codec_perf/perf.pb.go | 21 +- .../grpc/test/codec_perf/perf.proto | 14 + .../grpc/test/end2end_test.go | 2207 +++++++++++++---- .../grpc/test/grpc_testing/test.pb.go | 86 +- .../grpc/test/grpc_testing/test.proto | 14 + .../grpc/test/leakcheck/leakcheck.go | 118 + .../grpc/test/leakcheck/leakcheck_test.go | 76 + vendor/google.golang.org/grpc/test/race.go | 24 + .../google.golang.org/grpc/test/race_test.go | 39 - .../{servertester_test.go => servertester.go} | 43 +- .../grpc/test/testdata/ca.pem | 15 - .../grpc/test/testdata/server1.key | 16 - .../grpc/test/testdata/server1.pem | 16 - .../grpc/testdata/testdata.go | 63 + vendor/google.golang.org/grpc/trace.go | 50 +- .../grpc/transport/bdp_estimator.go | 143 ++ .../grpc/transport/control.go | 174 +- .../google.golang.org/grpc/transport/go16.go | 46 - .../google.golang.org/grpc/transport/go17.go | 46 - .../grpc/transport/handler_server.go | 107 +- .../grpc/transport/handler_server_test.go | 140 +- .../grpc/transport/http2_client.go | 936 +++---- .../grpc/transport/http2_server.go | 896 ++++--- .../grpc/transport/http_util.go | 288 +-- .../grpc/transport/http_util_test.go | 35 +- .../google.golang.org/grpc/transport/log.go | 50 + .../grpc/transport/testdata/ca.pem | 15 - .../grpc/transport/testdata/server1.key | 16 - .../grpc/transport/testdata/server1.pem | 16 - .../grpc/transport/transport.go | 357 ++- .../grpc/transport/transport_test.go | 1162 ++++++++- vendor/google.golang.org/grpc/vet.sh | 78 + 557 files changed, 36420 insertions(+), 11875 deletions(-) delete mode 100644 vendor/github.com/boltdb/bolt/Makefile delete mode 100644 vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go delete mode 100644 vendor/github.com/boltdb/bolt/freelist_test.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/.gitignore (65%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/LICENSE (100%) create mode 100644 vendor/github.com/coreos/bbolt/Makefile rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/README.md (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/appveyor.yml (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_386.go (72%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_amd64.go (73%) create mode 100644 vendor/github.com/coreos/bbolt/bolt_arm.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_arm64.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_linux.go (100%) create mode 100644 vendor/github.com/coreos/bbolt/bolt_mips64x.go rename vendor/github.com/{boltdb/bolt/bolt_arm.go => coreos/bbolt/bolt_mipsx.go} (55%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_openbsd.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc64.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_ppc64le.go (75%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_s390x.go (74%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_unix.go (80%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_unix_solaris.go (75%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bolt_windows.go (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/boltsync_unix.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bucket.go (95%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/bucket_test.go (95%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/cmd/bolt/main.go (70%) create mode 100644 vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/cursor.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/cursor_test.go (99%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/db.go (85%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/db_test.go (85%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/doc.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/errors.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/freelist.go (56%) create mode 100644 vendor/github.com/coreos/bbolt/freelist_test.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/node.go (99%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/node_test.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/page.go (88%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/page_test.go (100%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/quick_test.go (89%) create mode 100644 vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/simulation_test.go (67%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/tx.go (94%) rename vendor/github.com/{boltdb/bolt => coreos/bbolt}/tx_test.go (81%) create mode 100755 vendor/github.com/coreos/etcd/.semaphore.sh create mode 100644 vendor/github.com/coreos/etcd/Dockerfile-test delete mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer_test.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/example_auth_test.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/grpc_options.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/health_balancer.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/integration/black_hole_test.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/integration/network_partition_test.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/integration/server_shutdown_test.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/integration/util.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/ready_wait.go create mode 100644 vendor/github.com/coreos/etcd/hack/patch/README.md create mode 100755 vendor/github.com/coreos/etcd/hack/patch/cherrypick.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/Makefile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/README create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/Dockerfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/etcd.zone create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/named.conf create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/rdns.zone create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/resolv.conf create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/Dockerfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/Procfile create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencert.json create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencerts.sh create mode 100755 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/run.sh create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server-ca-csr.json create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.crt create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.key.insecure create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/etcd.zone create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/named.conf create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/rdns.zone create mode 100644 vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/resolv.conf create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/CHANGELOG.md create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.gitignore create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.swagger-codegen-ignore delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingNested.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingServiceApi.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbABitOfEverything.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbNumericEnum.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/NestedDeepEnum.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufDuration.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufEmpty.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/Sub2IdMessage.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/SubStringMessage.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_nested.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_service_api.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_client.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_response.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/configuration.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_a_bit_of_everything.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_numeric_enum.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/nested_deep_enum.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/protobuf_empty.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/sub_string_message.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.gitignore create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.swagger-codegen-ignore delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/EchoServiceApi.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/ExamplepbSimpleMessage.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_client.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_response.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/configuration.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/echo_service_api.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/examplepb_simple_message.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/examples/proto_error_test.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator_test.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main_test.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler_test.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go rename vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/{README.grcp-gateway => README.grpc-gateway} (100%) delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.pb.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.pb.go create mode 100644 vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE create mode 100644 vendor/google.golang.org/grpc/.please-update create mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/PATENTS create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/benchmark/benchmain/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/benchresult/main.go create mode 100644 vendor/google.golang.org/grpc/benchmark/latency/latency.go create mode 100644 vendor/google.golang.org/grpc/benchmark/latency/latency_test.go create mode 100644 vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100755 vendor/google.golang.org/grpc/coverage.sh rename vendor/google.golang.org/grpc/examples/helloworld/{mock => }/mock_helloworld/hw_mock.go (100%) rename vendor/google.golang.org/grpc/examples/helloworld/{mock/hw_test.go => mock_helloworld/hw_mock_test.go} (62%) create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go create mode 100644 vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem delete mode 100644 vendor/google.golang.org/grpc/go16.go delete mode 100644 vendor/google.golang.org/grpc/go17.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{grpclb.pb.go => messages/messages.pb.go} (79%) rename vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/{grpclb.proto => messages/messages.proto} (71%) create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go create mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2_test.go delete mode 100644 vendor/google.golang.org/grpc/interop/client/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/interop/client/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/interop/client/testdata/server1.pem mode change 100755 => 100644 vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go delete mode 100644 vendor/google.golang.org/grpc/interop/server/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/interop/server/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/interop/server/testdata/server1.pem create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver_test.go create mode 100644 vendor/google.golang.org/grpc/naming/go17.go create mode 100644 vendor/google.golang.org/grpc/naming/go17_test.go create mode 100644 vendor/google.golang.org/grpc/naming/go18.go create mode 100644 vendor/google.golang.org/grpc/naming/go18_test.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper_test.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/pickfirst_test.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18_test.go create mode 100644 vendor/google.golang.org/grpc/resolver/manual/manual.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go delete mode 100644 vendor/google.golang.org/grpc/stress/client/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/stress/client/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/stress/client/testdata/server1.pem create mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go create mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go create mode 100644 vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go create mode 100644 vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go create mode 100644 vendor/google.golang.org/grpc/test/race.go delete mode 100644 vendor/google.golang.org/grpc/test/race_test.go rename vendor/google.golang.org/grpc/test/{servertester_test.go => servertester.go} (80%) delete mode 100644 vendor/google.golang.org/grpc/test/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/test/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/test/testdata/server1.pem create mode 100644 vendor/google.golang.org/grpc/testdata/testdata.go create mode 100644 vendor/google.golang.org/grpc/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/transport/go16.go delete mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/log.go delete mode 100644 vendor/google.golang.org/grpc/transport/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/transport/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/transport/testdata/server1.pem create mode 100755 vendor/google.golang.org/grpc/vet.sh diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 089c4b39642a..0c4371136daf 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -655,363 +655,363 @@ }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/embed", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/error", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/integration", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/cors", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/debugutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/osutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/srv", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/testutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/store", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Comment": "v3.2.8", - "Rev": "e211fb6de3cb306aee245422e599ac521f6e21f7" + "Comment": "v3.2.16", + "Rev": "121edf0467052d55876a817b89875fb39a99bf78" }, { "ImportPath": "github.com/coreos/go-iptables/iptables", diff --git a/glide.lock b/glide.lock index e2fdc3932c2b..bbff109c8f3d 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 1360d8b9f1a8f475ada0a321c4ddbe7ad4824f830a4121dc47e0a04535a2d332 -updated: 2018-02-14T15:46:31.627992096-05:00 +hash: 8a470a862eecb96f2c1ad49b191200abd8181529cd5e39f0aa397c0d19d9a1a2 +updated: 2018-02-19T11:07:22.018677+01:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -94,8 +94,6 @@ imports: - quantile - name: github.com/blang/semver version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 -- name: github.com/boltdb/bolt - version: 583e8937c61f1af6513608ccc75c97b6abdf4ff9 - name: github.com/certifi/gocertifi version: deb3ae2ef2610fde3330947281941c562861188b - name: github.com/chai2010/gettext-go @@ -181,8 +179,10 @@ imports: version: 47536c89fcc545a87745e1a1573addc439409165 subpackages: - pkg/homedir +- name: github.com/coreos/bbolt + version: 32c383e75ce054674c53b5a07e55de85332aee14 - name: github.com/coreos/etcd - version: e211fb6de3cb306aee245422e599ac521f6e21f7 + version: 121edf0467052d55876a817b89875fb39a99bf78 subpackages: - alarm - auth @@ -490,6 +490,7 @@ imports: subpackages: - jsonpb - proto + - protoc-gen-go/descriptor - ptypes - ptypes/any - ptypes/duration @@ -641,7 +642,7 @@ imports: - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 2500245aa6110c562d17020fb31a2c133d737799 - name: github.com/grpc-ecosystem/grpc-gateway - version: 84398b94e188ee336f307779b57b3aa91af7063c + version: 8cc3a55af3bcf171a1c23a90c4df9cf591706104 subpackages: - runtime - runtime/internal @@ -947,6 +948,7 @@ imports: - template/clientset/versioned - template/clientset/versioned/scheme - template/clientset/versioned/typed/template/v1 + - template/clientset/versioned/typed/template/v1/fake - template/informers/externalversions - template/informers/externalversions/internalinterfaces - template/informers/externalversions/template @@ -1267,19 +1269,24 @@ imports: - name: google.golang.org/genproto version: 09f6ed296fc66555a25fe4ce95173148778dfa85 subpackages: + - googleapis/api/annotations - googleapis/rpc/status - name: google.golang.org/grpc - version: d2e1b51f33ff8c5e4a15560ff049d200e83726c5 + version: 5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e subpackages: + - balancer - codes + - connectivity - credentials - - grpclb/grpc_lb_v1 + - grpclb/grpc_lb_v1/messages - grpclog + - health/grpc_health_v1 - internal - keepalive - metadata - naming - peer + - resolver - stats - status - tap @@ -1303,7 +1310,7 @@ imports: - name: gopkg.in/yaml.v2 version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 - name: k8s.io/api - version: 78d52114c359511e7d0d5af4fc50148b5392d7b4 + version: 30a449993d4d38589fa79362431bd9b04644cab5 repo: git@github.com:openshift/kubernetes-api subpackages: - admission/v1beta1 @@ -1337,7 +1344,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 910bc45a64fb0fe0846c1ec00fe9f0703ddaa314 + version: 48f5e87c5f0ae81d3ec5523fd409a702b0c9c3ee repo: git@github.com:openshift/kubernetes-apiextensions-apiserver subpackages: - pkg/apis/apiextensions @@ -1369,7 +1376,7 @@ imports: - pkg/registry/customresource - pkg/registry/customresourcedefinition - name: k8s.io/apimachinery - version: 7c9cf22bb3b216a520f072212a0ee84c20b0ae0f + version: 12746011c32507d61f9ac778e75233a44a2d268a repo: git@github.com:openshift/kubernetes-apimachinery subpackages: - pkg/api/equality @@ -1435,7 +1442,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 093dc91208134c6033229e0a502cbd579852f645 + version: 017dc6a13fdba5ec7b11dd06ef8018266244447a repo: git@github.com:openshift/kubernetes-apiserver subpackages: - pkg/admission @@ -1544,7 +1551,7 @@ imports: - plugin/pkg/authenticator/token/webhook - plugin/pkg/authorizer/webhook - name: k8s.io/client-go - version: 5029a91c3b199abd476215b9930070585c70d62b + version: 1502c0d02760ce07bc53f5f4656a4878566b1ef7 repo: git@github.com:openshift/kubernetes-client-go subpackages: - discovery @@ -1722,7 +1729,7 @@ imports: - util/testing - util/workqueue - name: k8s.io/code-generator - version: 517f776cbba8525815ebbd787a53004844479497 + version: 4a42db67724ec4bf63a6ff162e97543e0e312770 repo: git@github.com:openshift/kubernetes-code-generator - name: k8s.io/gengo version: c2e273a26d3fc59b60cf6de2fb80eace7402de6e @@ -1732,7 +1739,7 @@ imports: subpackages: - metrics/api/v1/types - name: k8s.io/kube-aggregator - version: 9ef7aa49fa5f75a8fb80b681c06b793f8fafbe1a + version: f02af3e9b7d164a6633c2014c9359ee918ff9b65 repo: git@github.com:openshift/kube-aggregator subpackages: - pkg/apis/apiregistration @@ -2515,7 +2522,7 @@ imports: - third_party/forked/gonum/graph/simple - third_party/forked/gonum/graph/traverse - name: k8s.io/metrics - version: a7339a9902a766bceabf8a52a1ab2a9dc0371703 + version: ff454b3dae7059eae17d532cc35bfc478e20a88e repo: git@github.com:openshift/kubernetes-metrics subpackages: - pkg/apis/custom_metrics diff --git a/glide.yaml b/glide.yaml index 02247ffb223b..9205dc56efa3 100644 --- a/glide.yaml +++ b/glide.yaml @@ -93,7 +93,11 @@ import: version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 # master - package: github.com/coreos/etcd - version: v3.2.8 + version: v3.2.16 +- package: google.golang.org/grpc + version: v1.7.5 +- package: github.com/grpc-ecosystem/grpc-gateway + version: v1.3.0 # cli - package: github.com/gonum/blas version: 37e82626499e1df7c54aeaba0959fd6e7e8dc1e4 diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63adcd7..000000000000 --- a/vendor/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go b/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go deleted file mode 100644 index c378b790d551..000000000000 --- a/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package main_test - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/boltdb/bolt" - "github.com/boltdb/bolt/cmd/bolt" -) - -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := MustOpen(0666, nil) - db.DB.Close() - defer db.Close() - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path); err != nil { - t.Fatal(err) - } -} - -// Ensure the "stats" command executes correctly with an empty database. -func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 0 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 0\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 0\n" + - "\tNumber of levels in B+tree: 0\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 0\n" + - "\tBytes actually used for leaf data: 0 (0%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 0\n" + - "\tTotal number on inlined buckets: 0 (0%)\n" + - "\tBytes used for inlined buckets: 0 (0%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - -// MustOpen creates a Bolt database in a temporary location. -func MustOpen(mode os.FileMode, options *bolt.Options) *DB { - // Create temporary path. - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - - db, err := bolt.Open(f.Name(), mode, options) - if err != nil { - panic(err.Error()) - } - return &DB{DB: db, Path: f.Name()} -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB - Path string -} - -// Close closes and removes the database. -func (db *DB) Close() error { - defer os.Remove(db.Path) - return db.DB.Close() -} diff --git a/vendor/github.com/boltdb/bolt/freelist_test.go b/vendor/github.com/boltdb/bolt/freelist_test.go deleted file mode 100644 index 4e9b3a8dbfa9..000000000000 --- a/vendor/github.com/boltdb/bolt/freelist_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package bolt - -import ( - "math/rand" - "reflect" - "sort" - "testing" - "unsafe" -) - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) - f.release(100) - f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelist_allocate(t *testing.T) { - f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - if id := int(f.allocate(3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - if id := int(f.allocate(1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 - - // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} - f.pending[100] = []pgid{28, 11} - f.pending[101] = []pgid{3} - p := (*page)(unsafe.Pointer(&buf[0])) - if err := f.write(p); err != nil { - t.Fatal(err) - } - - // Read the page back out. - f2 := newFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { - t.Fatalf("exp=%v; got=%v", exp, f2.ids) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} - f.release(1) - } -} - -func randomPgids(n int) []pgid { - rand.Seed(42) - pgids := make(pgids, n) - for i := range pgids { - pgids[i] = pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore similarity index 65% rename from vendor/github.com/boltdb/bolt/.gitignore rename to vendor/github.com/coreos/bbolt/.gitignore index c7bd2b7a5b84..c2a8cfa788c0 100644 --- a/vendor/github.com/boltdb/bolt/.gitignore +++ b/vendor/github.com/coreos/bbolt/.gitignore @@ -2,3 +2,4 @@ *.test *.swp /bin/ +cmd/bolt/bolt diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE similarity index 100% rename from vendor/github.com/boltdb/bolt/LICENSE rename to vendor/github.com/coreos/bbolt/LICENSE diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile new file mode 100644 index 000000000000..43b94f3bdfe8 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/Makefile @@ -0,0 +1,30 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt + +test: + go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + go test -v ./cmd/bolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/coreos/bbolt/README.md similarity index 88% rename from vendor/github.com/boltdb/bolt/README.md rename to vendor/github.com/coreos/bbolt/README.md index 8523e3377344..015f0efbe845 100644 --- a/vendor/github.com/boltdb/bolt/README.md +++ b/vendor/github.com/coreos/bbolt/README.md @@ -1,6 +1,16 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +bbolt ==== +[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/coreos/bbolt) +[![Coverage](https://codecov.io/gh/coreos/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/bbolt) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] [LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database @@ -10,16 +20,18 @@ Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it. +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ ## Project Status -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. ## Table of Contents @@ -59,7 +71,7 @@ services every day. To start using Bolt, install Go and run `go get`: ```sh -$ go get github.com/boltdb/bolt/... +$ go get github.com/coreos/bbolt/... ``` This will retrieve the library and install the `bolt` command line utility into @@ -79,7 +91,7 @@ package main import ( "log" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func main() { @@ -209,7 +221,7 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but **please** be sure to close +You can use the `DB.Begin()` function directly but **please** be sure to close the transaction. ```go @@ -395,7 +407,7 @@ db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } @@ -448,6 +460,10 @@ db.View(func(tx *bolt.Tx) error { }) ``` +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. ### Nested buckets @@ -460,6 +476,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) func (*Bucket) DeleteBucket(key []byte) error ``` +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + ### Database backups @@ -469,7 +534,7 @@ this from a read-only transaction, it will perform a hot backup and not block your other database reads and writes. By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +system's page cache. See the [`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx) documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to @@ -715,6 +780,9 @@ Here are a few things to note when evaluating and using Bolt: can be reused by a new page or can be unmapped from virtual memory and you'll see an `unexpected fault address` panic when accessing it. +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for buckets that have random inserts will cause your database to have very poor page utilization. @@ -755,7 +823,7 @@ Here are a few things to note when evaluating and using Bolt: ## Reading the Source -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, transactional key/value database so it can be a good starting point for people interested in how databases work. @@ -848,5 +916,13 @@ Below is a list of public, open source projects that use Bolt: * [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml similarity index 100% rename from vendor/github.com/boltdb/bolt/appveyor.yml rename to vendor/github.com/coreos/bbolt/appveyor.yml diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go similarity index 72% rename from vendor/github.com/boltdb/bolt/bolt_386.go rename to vendor/github.com/coreos/bbolt/bolt_386.go index e659bfb91f33..820d533c15fa 100644 --- a/vendor/github.com/boltdb/bolt/bolt_386.go +++ b/vendor/github.com/coreos/bbolt/bolt_386.go @@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go similarity index 73% rename from vendor/github.com/boltdb/bolt/bolt_amd64.go rename to vendor/github.com/coreos/bbolt/bolt_amd64.go index cca6b7eb7070..98fafdb47d86 100644 --- a/vendor/github.com/boltdb/bolt/bolt_amd64.go +++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go @@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go new file mode 100644 index 000000000000..7e5cb4b94128 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_arm64.go rename to vendor/github.com/coreos/bbolt/bolt_arm64.go index 6d2309352e06..b26d84f91ba7 100644 --- a/vendor/github.com/boltdb/bolt/bolt_arm64.go +++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_linux.go rename to vendor/github.com/coreos/bbolt/bolt_linux.go diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go new file mode 100644 index 000000000000..134b578bd447 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_mipsx.go similarity index 55% rename from vendor/github.com/boltdb/bolt/bolt_arm.go rename to vendor/github.com/coreos/bbolt/bolt_mipsx.go index e659bfb91f33..d5ecb0597e45 100644 --- a/vendor/github.com/boltdb/bolt/bolt_arm.go +++ b/vendor/github.com/coreos/bbolt/bolt_mipsx.go @@ -1,7 +1,12 @@ +// +build mips mipsle + package bolt // maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB +const maxMapSize = 0x40000000 // 1GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_openbsd.go rename to vendor/github.com/coreos/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_ppc.go rename to vendor/github.com/coreos/bbolt/bolt_ppc.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_ppc64.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64.go index 2dc6be02e3e3..9331d9771eb3 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_ppc64le.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64le.go index 8351e129f6a3..8c143bc5d194 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go similarity index 74% rename from vendor/github.com/boltdb/bolt/bolt_s390x.go rename to vendor/github.com/coreos/bbolt/bolt_s390x.go index f4dd26bbba7c..d7c39af92539 100644 --- a/vendor/github.com/boltdb/bolt/bolt_s390x.go +++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go similarity index 80% rename from vendor/github.com/boltdb/bolt/bolt_unix.go rename to vendor/github.com/coreos/bbolt/bolt_unix.go index cad62dda1e38..06592a08089f 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix.go @@ -13,29 +13,32 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_unix_solaris.go rename to vendor/github.com/coreos/bbolt/bolt_unix_solaris.go index 307bf2b3ee97..fd8335ecc963 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go @@ -13,34 +13,33 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go similarity index 88% rename from vendor/github.com/boltdb/bolt/bolt_windows.go rename to vendor/github.com/coreos/bbolt/bolt_windows.go index d538e6afd77a..ca6f9a11c241 100644 --- a/vendor/github.com/boltdb/bolt/bolt_windows.go +++ b/vendor/github.com/coreos/bbolt/bolt_windows.go @@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro db.lockfile = f var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := f.Fd() + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + // Attempt to obtain an exclusive lock. + err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { return err } + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } @@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro func funlock(db *DB) error { err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) db.lockfile.Close() - os.Remove(db.path+lockExt) + os.Remove(db.path + lockExt) return err } diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/boltdb/bolt/boltsync_unix.go rename to vendor/github.com/coreos/bbolt/boltsync_unix.go diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go similarity index 95% rename from vendor/github.com/boltdb/bolt/bucket.go rename to vendor/github.com/coreos/bbolt/bucket.go index d2f8c524e42f..44db88b8abde 100644 --- a/vendor/github.com/boltdb/bolt/bucket.go +++ b/vendor/github.com/coreos/bbolt/bucket.go @@ -14,13 +14,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( @@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { + if b.tx.writable && !unaligned { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { @@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue } + return nil, ErrIncompatibleValue } // Create empty, inline bucket. @@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error { // Move cursor to correct position. c := b.Cursor() - _, _, flags := c.seek(key) + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { @@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error { return nil } +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { diff --git a/vendor/github.com/boltdb/bolt/bucket_test.go b/vendor/github.com/coreos/bbolt/bucket_test.go similarity index 95% rename from vendor/github.com/boltdb/bolt/bucket_test.go rename to vendor/github.com/coreos/bbolt/bucket_test.go index 528fec24dedf..b7ce32c130ac 100644 --- a/vendor/github.com/boltdb/bolt/bucket_test.go +++ b/vendor/github.com/coreos/bbolt/bucket_test.go @@ -13,7 +13,7 @@ import ( "testing" "testing/quick" - "github.com/boltdb/bolt" + "github.com/coreos/bbolt" ) // Ensure that a bucket that gets a non-existent key returns nil. @@ -113,6 +113,7 @@ func TestBucket_Get_Capacity(t *testing.T) { // Ensure slice can be appended to without a segfault. k = append(k, []byte("123")...) v = append(v, []byte("123")...) + _, _ = k, v // to pass ineffassign return nil }); err != nil { @@ -423,6 +424,55 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { }); err != nil { t.Fatal(err) } + + // Check more than an overflow's worth of pages are freed. + stats := db.Stats() + freePages := stats.FreePageN + stats.PendingPageN + if freePages <= 0xFFFF { + t.Fatalf("expected more than 0xFFFF free pages, got %v", freePages) + } + + // Free page count should be preserved on reopen. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + db.MustReopen() + if reopenFreePages := db.Stats().FreePageN; freePages != reopenFreePages { + t.Fatalf("expected %d free pages, got %+v", freePages, db.Stats()) + } +} + +// Ensure that deleting of non-existing key is a no-op. +func TestBucket_Delete_NonExisting(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err = b.CreateBucket([]byte("nested")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + if b.Bucket([]byte("nested")) == nil { + t.Fatal("nested bucket has been deleted") + } + return nil + }); err != nil { + t.Fatal(err) + } } // Ensure that accessing and updating nested buckets is ok across transactions. @@ -782,6 +832,48 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { } } +// Ensure bucket can set and update its sequence number. +func TestBucket_Sequence(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + bkt, err := tx.CreateBucket([]byte("0")) + if err != nil { + t.Fatal(err) + } + + // Retrieve sequence. + if v := bkt.Sequence(); v != 0 { + t.Fatalf("unexpected sequence: %d", v) + } + + // Update sequence. + if err := bkt.SetSequence(1000); err != nil { + t.Fatal(err) + } + + // Read sequence again. + if v := bkt.Sequence(); v != 1000 { + t.Fatalf("unexpected sequence: %d", v) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify sequence in separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 { + t.Fatalf("unexpected sequence: %d", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + // Ensure that a bucket can return an autoincrementing sequence. func TestBucket_NextSequence(t *testing.T) { db := MustOpenDB() @@ -1156,7 +1248,7 @@ func TestBucket_Stats(t *testing.T) { } // Only check allocations for 4KB pages. - if os.Getpagesize() == 4096 { + if db.Info().PageSize == 4096 { if stats.BranchAlloc != 4096 { t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) } else if stats.LeafAlloc != 36864 { @@ -1289,7 +1381,7 @@ func TestBucket_Stats_Small(t *testing.T) { t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) } - if os.Getpagesize() == 4096 { + if db.Info().PageSize == 4096 { if stats.BranchAlloc != 0 { t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) } else if stats.LeafAlloc != 0 { @@ -1348,7 +1440,7 @@ func TestBucket_Stats_EmptyBucket(t *testing.T) { t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) } - if os.Getpagesize() == 4096 { + if db.Info().PageSize == 4096 { if stats.BranchAlloc != 0 { t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) } else if stats.LeafAlloc != 0 { @@ -1450,7 +1542,7 @@ func TestBucket_Stats_Nested(t *testing.T) { t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) } - if os.Getpagesize() == 4096 { + if db.Info().PageSize == 4096 { if stats.BranchAlloc != 0 { t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) } else if stats.LeafAlloc != 8192 { @@ -1523,7 +1615,7 @@ func TestBucket_Stats_Large(t *testing.T) { t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) } - if os.Getpagesize() == 4096 { + if db.Info().PageSize == 4096 { if stats.BranchAlloc != 53248 { t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) } else if stats.LeafAlloc != 4898816 { @@ -1598,7 +1690,7 @@ func TestBucket_Put_Single(t *testing.T) { index++ return true - }, nil); err != nil { + }, qconfig()); err != nil { t.Error(err) } } diff --git a/vendor/github.com/boltdb/bolt/cmd/bolt/main.go b/vendor/github.com/coreos/bbolt/cmd/bolt/main.go similarity index 70% rename from vendor/github.com/boltdb/bolt/cmd/bolt/main.go rename to vendor/github.com/coreos/bbolt/cmd/bolt/main.go index b96e6f73511f..eb85e05c9d81 100644 --- a/vendor/github.com/boltdb/bolt/cmd/bolt/main.go +++ b/vendor/github.com/coreos/bbolt/cmd/bolt/main.go @@ -19,7 +19,7 @@ import ( "unicode/utf8" "unsafe" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) var ( @@ -49,11 +49,17 @@ var ( // ErrPageIDRequired is returned when a required page id is not specified. ErrPageIDRequired = errors.New("page id required") - // ErrPageNotFound is returned when specifying a page above the high water mark. - ErrPageNotFound = errors.New("page not found") + // ErrBucketRequired is returned when a bucket is not specified. + ErrBucketRequired = errors.New("bucket required") - // ErrPageFreed is returned when reading a page that has already been freed. - ErrPageFreed = errors.New("page freed") + // ErrBucketNotFound is returned when a bucket is not found. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrKeyRequired is returned when a key is not specified. + ErrKeyRequired = errors.New("key required") + + // ErrKeyNotFound is returned when a key is not found. + ErrKeyNotFound = errors.New("key not found") ) // PageHeaderSize represents the size of the bolt.page header. @@ -100,12 +106,22 @@ func (m *Main) Run(args ...string) error { return ErrUsage case "bench": return newBenchCommand(m).Run(args[1:]...) + case "buckets": + return newBucketsCommand(m).Run(args[1:]...) case "check": return newCheckCommand(m).Run(args[1:]...) + case "compact": + return newCompactCommand(m).Run(args[1:]...) case "dump": return newDumpCommand(m).Run(args[1:]...) + case "page-item": + return newPageItemCommand(m).Run(args[1:]...) + case "get": + return newGetCommand(m).Run(args[1:]...) case "info": return newInfoCommand(m).Run(args[1:]...) + case "keys": + return newKeysCommand(m).Run(args[1:]...) case "page": return newPageCommand(m).Run(args[1:]...) case "pages": @@ -129,10 +145,17 @@ Usage: The commands are: bench run synthetic benchmark against bolt + buckets print a list of buckets check verifies integrity of bolt database + compact copies a bolt database, compacting it in the process + dump print a hexadecimal dump of a single page + get print the value of a key in a bucket info print basic info + keys print a list of keys in a bucket help print this screen + page print one or more pages in human readable format pages print list of pages with their types + page-item print the key and value of a page item. stats iterate over all pages and generate usage stats Use "bolt [command] -h" for more information about a command. @@ -185,17 +208,9 @@ func (cmd *CheckCommand) Run(args ...string) error { // Perform consistency check. return db.View(func(tx *bolt.Tx) error { var count int - ch := tx.Check() - loop: - for { - select { - case err, ok := <-ch: - if !ok { - break loop - } - fmt.Fprintln(cmd.Stdout, err) - count++ - } + for err := range tx.Check() { + fmt.Fprintln(cmd.Stdout, err) + count++ } // Print summary of errors. @@ -356,7 +371,7 @@ func (cmd *DumpCommand) Run(args ...string) error { return nil } -// PrintPage prints a given page as hexidecimal. +// PrintPage prints a given page as hexadecimal. func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { const bytesPerLineN = 16 @@ -404,9 +419,173 @@ func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSi // Usage returns the help message. func (cmd *DumpCommand) Usage() string { return strings.TrimLeft(` -usage: bolt dump -page PAGEID PATH +usage: bolt dump PATH pageid [pageid...] + +Dump prints a hexadecimal dump of one or more pages. +`, "\n") +} + +// PageItemCommand represents the "page-item" command execution. +type PageItemCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newPageItemCommand returns a PageItemCommand. +func newPageItemCommand(m *Main) *PageItemCommand { + return &PageItemCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +type pageItemOptions struct { + help bool + keyOnly bool + valueOnly bool + format string +} + +// Run executes the command. +func (cmd *PageItemCommand) Run(args ...string) error { + // Parse flags. + options := &pageItemOptions{} + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.BoolVar(&options.keyOnly, "key-only", false, "Print only the key") + fs.BoolVar(&options.valueOnly, "value-only", false, "Print only the value") + fs.StringVar(&options.format, "format", "ascii-encoded", "Output format. One of: ascii-encoded|hex|bytes") + fs.BoolVar(&options.help, "h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if options.help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + if options.keyOnly && options.valueOnly { + return fmt.Errorf("The --key-only or --value-only flag may be set, but not both.") + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page id. + pageID, err := strconv.Atoi(fs.Arg(1)) + if err != nil { + return err + } + + // Read item id. + itemID, err := strconv.Atoi(fs.Arg(2)) + if err != nil { + return err + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Retrieve page info and page size. + _, buf, err := ReadPage(path, pageID) + if err != nil { + return err + } + + if !options.valueOnly { + err := cmd.PrintLeafItemKey(cmd.Stdout, buf, uint16(itemID), options.format) + if err != nil { + return err + } + } + if !options.keyOnly { + err := cmd.PrintLeafItemValue(cmd.Stdout, buf, uint16(itemID), options.format) + if err != nil { + return err + } + } + return nil +} + +// leafPageElement retrieves a leaf page element. +func (cmd *PageItemCommand) leafPageElement(pageBytes []byte, index uint16) (*leafPageElement, error) { + p := (*page)(unsafe.Pointer(&pageBytes[0])) + if index >= p.count { + return nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d.", p.count, index) + } + if p.Type() != "leaf" { + return nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Type()) + } + return p.leafPageElement(index), nil +} + +// writeBytes writes the byte to the writer. Supported formats: ascii-encoded, hex, bytes. +func (cmd *PageItemCommand) writeBytes(w io.Writer, b []byte, format string) error { + switch format { + case "ascii-encoded": + _, err := fmt.Fprintf(w, "%q", b) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, "\n") + return err + case "hex": + _, err := fmt.Fprintf(w, "%x", b) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, "\n") + return err + case "bytes": + _, err := w.Write(b) + return err + default: + return fmt.Errorf("writeBytes: unsupported format: %s", format) + } +} + +// PrintLeafItemKey writes the bytes of a leaf element's key. +func (cmd *PageItemCommand) PrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error { + e, err := cmd.leafPageElement(pageBytes, index) + if err != nil { + return err + } + return cmd.writeBytes(w, e.key(), format) +} + +// PrintLeafItemKey writes the bytes of a leaf element's value. +func (cmd *PageItemCommand) PrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error { + e, err := cmd.leafPageElement(pageBytes, index) + if err != nil { + return err + } + return cmd.writeBytes(w, e.value(), format) +} + +// Usage returns the help message. +func (cmd *PageItemCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt page-item [options] PATH pageid itemid + +Additional options include: + + --key-only + Print only the key + --value-only + Print only the value + --format + Output format. One of: ascii-encoded|hex|bytes (default=ascii-encoded) -Dump prints a hexidecimal dump of a single page. +page-item prints a page item key and value. `, "\n") } @@ -539,9 +718,9 @@ func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { b := (*bucket)(unsafe.Pointer(&e.value()[0])) v = fmt.Sprintf("", b.root, b.sequence) } else if isPrintable(string(e.value())) { - k = fmt.Sprintf("%q", string(e.value())) + v = fmt.Sprintf("%q", string(e.value())) } else { - k = fmt.Sprintf("%x", string(e.value())) + v = fmt.Sprintf("%x", string(e.value())) } fmt.Fprintf(w, "%s: %s\n", k, v) @@ -580,20 +759,29 @@ func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { p := (*page)(unsafe.Pointer(&buf[0])) + // Check for overflow and, if present, adjust starting index and actual element count. + idx, count := 0, int(p.count) + if p.count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "Item Count: %d\n", count) + fmt.Fprintf(w, "Overflow: %d\n", p.overflow) + fmt.Fprintf(w, "\n") // Print each page in the freelist. ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) - for i := uint16(0); i < p.count; i++ { + for i := int(idx); i < count; i++ { fmt.Fprintf(w, "%d\n", ids[i]) } fmt.Fprintf(w, "\n") return nil } -// PrintPage prints a given page as hexidecimal. +// PrintPage prints a given page as hexadecimal. func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { const bytesPerLineN = 16 @@ -641,7 +829,7 @@ func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSi // Usage returns the help message. func (cmd *PageCommand) Usage() string { return strings.TrimLeft(` -usage: bolt page -page PATH pageid [pageid...] +usage: bolt page PATH pageid [pageid...] Page prints one or more pages in human readable format. `, "\n") @@ -876,6 +1064,212 @@ experience corruption, please submit a ticket to the Bolt project page: `, "\n") } +// BucketsCommand represents the "buckets" command execution. +type BucketsCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewBucketsCommand returns a BucketsCommand. +func newBucketsCommand(m *Main) *BucketsCommand { + return &BucketsCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *BucketsCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print buckets. + return db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + fmt.Fprintln(cmd.Stdout, string(name)) + return nil + }) + }) +} + +// Usage returns the help message. +func (cmd *BucketsCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt buckets PATH + +Print a list of buckets. +`, "\n") +} + +// KeysCommand represents the "keys" command execution. +type KeysCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewKeysCommand returns a KeysCommand. +func newKeysCommand(m *Main) *KeysCommand { + return &KeysCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *KeysCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and bucket. + path, bucket := fs.Arg(0), fs.Arg(1) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } else if bucket == "" { + return ErrBucketRequired + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print keys. + return db.View(func(tx *bolt.Tx) error { + // Find bucket. + b := tx.Bucket([]byte(bucket)) + if b == nil { + return ErrBucketNotFound + } + + // Iterate over each key. + return b.ForEach(func(key, _ []byte) error { + fmt.Fprintln(cmd.Stdout, string(key)) + return nil + }) + }) +} + +// Usage returns the help message. +func (cmd *KeysCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt keys PATH BUCKET + +Print a list of keys in the given bucket. +`, "\n") +} + +// GetCommand represents the "get" command execution. +type GetCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewGetCommand returns a GetCommand. +func newGetCommand(m *Main) *GetCommand { + return &GetCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *GetCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path, bucket and key. + path, bucket, key := fs.Arg(0), fs.Arg(1), fs.Arg(2) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } else if bucket == "" { + return ErrBucketRequired + } else if key == "" { + return ErrKeyRequired + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print value. + return db.View(func(tx *bolt.Tx) error { + // Find bucket. + b := tx.Bucket([]byte(bucket)) + if b == nil { + return ErrBucketNotFound + } + + // Find value for given key. + val := b.Get([]byte(key)) + if val == nil { + return ErrKeyNotFound + } + + fmt.Fprintln(cmd.Stdout, string(val)) + return nil + }) +} + +// Usage returns the help message. +func (cmd *GetCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt get PATH BUCKET KEY + +Print the value of the given key in the given bucket. +`, "\n") +} + var benchBucketName = []byte("bench") // BenchCommand represents the "bench" command execution. @@ -1028,12 +1422,12 @@ func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, res func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) + return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { i++; return i }) } func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) + return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { return r.Uint32() }) } func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { @@ -1180,12 +1574,14 @@ func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt var count int var top = tx.Bucket(benchBucketName) if err := top.ForEach(func(name, _ []byte) error { - c := top.Bucket(name).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return ErrInvalidValue + if b := top.Bucket(name); b != nil { + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return ErrInvalidValue + } + count++ } - count++ } return nil }); err != nil { @@ -1530,3 +1926,211 @@ func (n *leafPageElement) value() []byte { buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] } + +// CompactCommand represents the "compact" command execution. +type CompactCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + + SrcPath string + DstPath string + TxMaxSize int64 +} + +// newCompactCommand returns a CompactCommand. +func newCompactCommand(m *Main) *CompactCommand { + return &CompactCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CompactCommand) Run(args ...string) (err error) { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.SetOutput(ioutil.Discard) + fs.StringVar(&cmd.DstPath, "o", "", "") + fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "") + if err := fs.Parse(args); err == flag.ErrHelp { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } else if err != nil { + return err + } else if cmd.DstPath == "" { + return fmt.Errorf("output file required") + } + + // Require database paths. + cmd.SrcPath = fs.Arg(0) + if cmd.SrcPath == "" { + return ErrPathRequired + } + + // Ensure source file exists. + fi, err := os.Stat(cmd.SrcPath) + if os.IsNotExist(err) { + return ErrFileNotFound + } else if err != nil { + return err + } + initialSize := fi.Size() + + // Open source database. + src, err := bolt.Open(cmd.SrcPath, 0444, nil) + if err != nil { + return err + } + defer src.Close() + + // Open destination database. + dst, err := bolt.Open(cmd.DstPath, fi.Mode(), nil) + if err != nil { + return err + } + defer dst.Close() + + // Run compaction. + if err := cmd.compact(dst, src); err != nil { + return err + } + + // Report stats on new size. + fi, err = os.Stat(cmd.DstPath) + if err != nil { + return err + } else if fi.Size() == 0 { + return fmt.Errorf("zero db size") + } + fmt.Fprintf(cmd.Stdout, "%d -> %d bytes (gain=%.2fx)\n", initialSize, fi.Size(), float64(initialSize)/float64(fi.Size())) + + return nil +} + +func (cmd *CompactCommand) compact(dst, src *bolt.DB) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > cmd.TxMaxSize && cmd.TxMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // Fill the entire page for best compaction. + b.FillPercent = 1.0 + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func (cmd *CompactCommand) walk(db *bolt.DB, walkFn walkFunc) error { + return db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return cmd.walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func (cmd *CompactCommand) walkBucket(b *bolt.Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return cmd.walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return cmd.walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} + +// Usage returns the help message. +func (cmd *CompactCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt compact [options] -o DST SRC + +Compact opens a database at SRC path and walks it recursively, copying keys +as they are found from all buckets, to a newly created database at DST path. + +The original database is left untouched. + +Additional options include: + + -tx-max-size NUM + Specifies the maximum size of individual transactions. + Defaults to 64KB. +`, "\n") +} diff --git a/vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go b/vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go new file mode 100644 index 000000000000..16bf804ae319 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go @@ -0,0 +1,456 @@ +package main_test + +import ( + "bytes" + crypto "crypto/rand" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "strconv" + "testing" + + "github.com/coreos/bbolt" + "github.com/coreos/bbolt/cmd/bolt" +) + +// Ensure the "info" command can print information about a database. +func TestInfoCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + db.DB.Close() + defer db.Close() + + // Run the info command. + m := NewMain() + if err := m.Run("info", db.Path); err != nil { + t.Fatal(err) + } +} + +// Ensure the "stats" command executes correctly with an empty database. +func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 0 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 0\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 0\n" + + "\tNumber of levels in B+tree: 0\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 0\n" + + "\tBytes actually used for leaf data: 0 (0%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 0\n" + + "\tTotal number on inlined buckets: 0 (0%)\n" + + "\tBytes used for inlined buckets: 0 (0%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Ensure the "stats" command can execute correctly. +func TestStatsCommand_Run(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + // Create "foo" bucket. + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + return err + } + for i := 0; i < 10; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "bar" bucket. + b, err = tx.CreateBucket([]byte("bar")) + if err != nil { + return err + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "baz" bucket. + b, err = tx.CreateBucket([]byte("baz")) + if err != nil { + return err + } + if err := b.Put([]byte("key"), []byte("value")); err != nil { + return err + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 3 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 1\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 111\n" + + "\tNumber of levels in B+tree: 1\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 4096\n" + + "\tBytes actually used for leaf data: 1996 (48%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 3\n" + + "\tTotal number on inlined buckets: 2 (66%)\n" + + "\tBytes used for inlined buckets: 236 (11%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Ensure the "buckets" command can print a list of buckets. +func TestBucketsCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{"foo", "bar", "baz"} { + _, err := tx.CreateBucket([]byte(name)) + if err != nil { + return err + } + } + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + expected := "bar\nbaz\nfoo\n" + + // Run the command. + m := NewMain() + if err := m.Run("buckets", db.Path); err != nil { + t.Fatal(err) + } else if actual := m.Stdout.String(); actual != expected { + t.Fatalf("unexpected stdout:\n\n%s", actual) + } +} + +// Ensure the "keys" command can print a list of keys for a bucket. +func TestKeysCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{"foo", "bar"} { + b, err := tx.CreateBucket([]byte(name)) + if err != nil { + return err + } + for i := 0; i < 3; i++ { + key := fmt.Sprintf("%s-%d", name, i) + if err := b.Put([]byte(key), []byte{0}); err != nil { + return err + } + } + } + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + expected := "foo-0\nfoo-1\nfoo-2\n" + + // Run the command. + m := NewMain() + if err := m.Run("keys", db.Path, "foo"); err != nil { + t.Fatal(err) + } else if actual := m.Stdout.String(); actual != expected { + t.Fatalf("unexpected stdout:\n\n%s", actual) + } +} + +// Ensure the "get" command can print the value of a key in a bucket. +func TestGetCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{"foo", "bar"} { + b, err := tx.CreateBucket([]byte(name)) + if err != nil { + return err + } + for i := 0; i < 3; i++ { + key := fmt.Sprintf("%s-%d", name, i) + val := fmt.Sprintf("val-%s-%d", name, i) + if err := b.Put([]byte(key), []byte(val)); err != nil { + return err + } + } + } + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + expected := "val-foo-1\n" + + // Run the command. + m := NewMain() + if err := m.Run("get", db.Path, "foo", "foo-1"); err != nil { + t.Fatal(err) + } else if actual := m.Stdout.String(); actual != expected { + t.Fatalf("unexpected stdout:\n\n%s", actual) + } +} + +// Main represents a test wrapper for main.Main that records output. +type Main struct { + *main.Main + Stdin bytes.Buffer + Stdout bytes.Buffer + Stderr bytes.Buffer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + m := &Main{Main: main.NewMain()} + m.Main.Stdin = &m.Stdin + m.Main.Stdout = &m.Stdout + m.Main.Stderr = &m.Stderr + return m +} + +// MustOpen creates a Bolt database in a temporary location. +func MustOpen(mode os.FileMode, options *bolt.Options) *DB { + // Create temporary path. + f, _ := ioutil.TempFile("", "bolt-") + f.Close() + os.Remove(f.Name()) + + db, err := bolt.Open(f.Name(), mode, options) + if err != nil { + panic(err.Error()) + } + return &DB{DB: db, Path: f.Name()} +} + +// DB is a test wrapper for bolt.DB. +type DB struct { + *bolt.DB + Path string +} + +// Close closes and removes the database. +func (db *DB) Close() error { + defer os.Remove(db.Path) + return db.DB.Close() +} + +func TestCompactCommand_Run(t *testing.T) { + var s int64 + if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil { + t.Fatal(err) + } + rand.Seed(s) + + dstdb := MustOpen(0666, nil) + dstdb.Close() + + // fill the db + db := MustOpen(0666, nil) + if err := db.Update(func(tx *bolt.Tx) error { + n := 2 + rand.Intn(5) + for i := 0; i < n; i++ { + k := []byte(fmt.Sprintf("b%d", i)) + b, err := tx.CreateBucketIfNotExists(k) + if err != nil { + return err + } + if err := b.SetSequence(uint64(i)); err != nil { + return err + } + if err := fillBucket(b, append(k, '.')); err != nil { + return err + } + } + return nil + }); err != nil { + db.Close() + t.Fatal(err) + } + + // make the db grow by adding large values, and delete them. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) + if err != nil { + return err + } + n := 5 + rand.Intn(5) + for i := 0; i < n; i++ { + v := make([]byte, 1000*1000*(1+rand.Intn(5))) + _, err := crypto.Read(v) + if err != nil { + return err + } + if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil { + return err + } + } + return nil + }); err != nil { + db.Close() + t.Fatal(err) + } + if err := db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("large_vals")).Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + return tx.DeleteBucket([]byte("large_vals")) + }); err != nil { + db.Close() + t.Fatal(err) + } + db.DB.Close() + defer db.Close() + defer dstdb.Close() + + dbChk, err := chkdb(db.Path) + if err != nil { + t.Fatal(err) + } + + m := NewMain() + if err := m.Run("compact", "-o", dstdb.Path, db.Path); err != nil { + t.Fatal(err) + } + + dbChkAfterCompact, err := chkdb(db.Path) + if err != nil { + t.Fatal(err) + } + + dstdbChk, err := chkdb(dstdb.Path) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(dbChk, dbChkAfterCompact) { + t.Error("the original db has been touched") + } + if !bytes.Equal(dbChk, dstdbChk) { + t.Error("the compacted db data isn't the same than the original db") + } +} + +func fillBucket(b *bolt.Bucket, prefix []byte) error { + n := 10 + rand.Intn(50) + for i := 0; i < n; i++ { + v := make([]byte, 10*(1+rand.Intn(4))) + _, err := crypto.Read(v) + if err != nil { + return err + } + k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) + if err := b.Put(k, v); err != nil { + return err + } + } + // limit depth of subbuckets + s := 2 + rand.Intn(4) + if len(prefix) > (2*s + 1) { + return nil + } + n = 1 + rand.Intn(3) + for i := 0; i < n; i++ { + k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) + sb, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := fillBucket(sb, append(k, '.')); err != nil { + return err + } + } + return nil +} + +func chkdb(path string) ([]byte, error) { + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return nil, err + } + defer db.Close() + var buf bytes.Buffer + err = db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return walkBucket(b, name, nil, &buf) + }) + }) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil { + return err + } + + // not a bucket, exit. + if v != nil { + return nil + } + return parent.ForEach(func(k, v []byte) error { + if v == nil { + return walkBucket(parent.Bucket(k), k, nil, w) + } + return walkBucket(parent, k, v, w) + }) +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go similarity index 100% rename from vendor/github.com/boltdb/bolt/cursor.go rename to vendor/github.com/coreos/bbolt/cursor.go diff --git a/vendor/github.com/boltdb/bolt/cursor_test.go b/vendor/github.com/coreos/bbolt/cursor_test.go similarity index 99% rename from vendor/github.com/boltdb/bolt/cursor_test.go rename to vendor/github.com/coreos/bbolt/cursor_test.go index 562d60f9af3b..7b1ae1983cef 100644 --- a/vendor/github.com/boltdb/bolt/cursor_test.go +++ b/vendor/github.com/coreos/bbolt/cursor_test.go @@ -11,7 +11,7 @@ import ( "testing" "testing/quick" - "github.com/boltdb/bolt" + "github.com/coreos/bbolt" ) // Ensure that a cursor can return a reference to the bucket that created it. diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/coreos/bbolt/db.go similarity index 85% rename from vendor/github.com/boltdb/bolt/db.go rename to vendor/github.com/coreos/bbolt/db.go index 1223493ca7be..08dfd0ab17d6 100644 --- a/vendor/github.com/boltdb/bolt/db.go +++ b/vendor/github.com/coreos/bbolt/db.go @@ -7,8 +7,7 @@ import ( "log" "os" "runtime" - "runtime/debug" - "strings" + "sort" "sync" "time" "unsafe" @@ -23,6 +22,8 @@ const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED +const pgidNoFreelist pgid = 0xffffffffffffffff + // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes @@ -39,6 +40,9 @@ const ( // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -61,6 +65,11 @@ type DB struct { // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and @@ -107,9 +116,11 @@ type DB struct { opened bool rwtx *Tx txs []*Tx - freelist *freelist stats Stats + freelist *freelist + freelistLoad sync.Once + pagePool sync.Pool batchMu sync.Mutex @@ -148,14 +159,17 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - + db := &DB{ + opened: true, + } // Set default options if no options are provided. if options == nil { options = DefaultOptions } + db.NoSync = options.NoSync db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + db.lockfile = nil // make 'unused' happy. TODO: rework locks _ = db.close() return nil, err } @@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Default values for test hooks db.ops.writeAt = db.file.WriteAt + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, err @@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // Read the first meta page to determine the page size. var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { db.pageSize = int(m.pageSize) } + } else { + return nil, ErrInvalid } } @@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } // Mark the database as opened and return. return db, nil } +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist() + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = len(db.freelist.ids) + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { @@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) { // init creates a new database file and initializes its meta pages. func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { @@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t + db.freePages() + return t, nil +} - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid } if minid > 0 { db.freelist.release(minid - 1) } - - return t, nil + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. } +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) { // Remove the transaction. for i, t := range db.txs { if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] break } } @@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error { return err } - if err := t.Rollback(); err != nil { - return err - } - - return nil + return t.Rollback() } // Batch calls fn as part of a batch. It behaves similar to Update, @@ -823,7 +891,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { +func (db *DB) allocate(txid txid, count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -835,7 +903,7 @@ func (db *DB) allocate(count int) (*page, error) { p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { + if p.id = db.freelist.allocate(txid, count); p.id != 0 { return p, nil } @@ -890,6 +958,38 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. @@ -900,6 +1000,10 @@ type Options struct { // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool @@ -916,6 +1020,14 @@ type Options struct { // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -952,15 +1064,11 @@ func (s *Stats) Sub(other *Stats) Stats { diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN + diff.TxN = s.TxN - other.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - type Info struct { Data uintptr PageSize int @@ -999,7 +1107,8 @@ func (m *meta) copy(dest *meta) { func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } @@ -1026,11 +1135,3 @@ func _assert(condition bool, msg string, v ...interface{}) { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/boltdb/bolt/db_test.go b/vendor/github.com/coreos/bbolt/db_test.go similarity index 85% rename from vendor/github.com/boltdb/bolt/db_test.go rename to vendor/github.com/coreos/bbolt/db_test.go index 74ff93a9f3d0..e3a58c3cabe6 100644 --- a/vendor/github.com/boltdb/bolt/db_test.go +++ b/vendor/github.com/coreos/bbolt/db_test.go @@ -9,28 +9,20 @@ import ( "hash/fnv" "io/ioutil" "log" + "math/rand" "os" "path/filepath" "regexp" - "runtime" - "sort" - "strings" "sync" "testing" "time" "unsafe" - "github.com/boltdb/bolt" + "github.com/coreos/bbolt" ) var statsFlag = flag.Bool("stats", false, "show performance stats") -// version is the data file format version. -const version = 2 - -// magic is the marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - // pageSize is the size of one page in the data file. const pageSize = 4096 @@ -53,6 +45,8 @@ type meta struct { // Ensure that a database can be opened without error. func TestOpen(t *testing.T) { path := tempfile() + defer os.RemoveAll(path) + db, err := bolt.Open(path, 0666, nil) if err != nil { t.Fatal(err) @@ -88,6 +82,7 @@ func TestOpen_ErrNotExists(t *testing.T) { // Ensure that opening a file that is not a Bolt database returns ErrInvalid. func TestOpen_ErrInvalid(t *testing.T) { path := tempfile() + defer os.RemoveAll(path) f, err := os.Create(path) if err != nil { @@ -99,7 +94,6 @@ func TestOpen_ErrInvalid(t *testing.T) { if err := f.Close(); err != nil { t.Fatal(err) } - defer os.Remove(path) if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { t.Fatalf("unexpected error: %s", err) @@ -180,69 +174,6 @@ func TestOpen_ErrChecksum(t *testing.T) { } } -// Ensure that opening an already open database file will timeout. -func TestOpen_Timeout(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("solaris fcntl locks don't support intra-process locking") - } - - path := tempfile() - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } else if db0 == nil { - t.Fatal("expected database") - } - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - if err != bolt.ErrTimeout { - t.Fatalf("unexpected timeout: %s", err) - } else if db1 != nil { - t.Fatal("unexpected database") - } else if time.Since(start) <= 100*time.Millisecond { - t.Fatal("expected to wait at least timeout duration") - } - - if err := db0.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening an already open database file will wait until its closed. -func TestOpen_Wait(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("solaris fcntl locks don't support intra-process locking") - } - - path := tempfile() - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - - // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { _ = db0.Close() }) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - if err != nil { - t.Fatal(err) - } else if time.Since(start) <= 100*time.Millisecond { - t.Fatal("expected to wait at least timeout duration") - } - - if err := db1.Close(); err != nil { - t.Fatal(err) - } -} - // Ensure that opening a database does not increase its size. // https://github.com/boltdb/bolt/issues/291 func TestOpen_Size(t *testing.T) { @@ -374,15 +305,16 @@ func TestOpen_Size_Large(t *testing.T) { // Ensure that a re-opened database is consistent. func TestOpen_Check(t *testing.T) { path := tempfile() + defer os.RemoveAll(path) db, err := bolt.Open(path, 0666, nil) if err != nil { t.Fatal(err) } - if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + if err = db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { t.Fatal(err) } - if err := db.Close(); err != nil { + if err = db.Close(); err != nil { t.Fatal(err) } @@ -406,17 +338,19 @@ func TestOpen_MetaInitWriteError(t *testing.T) { // Ensure that a database that is too small returns an error. func TestOpen_FileTooSmall(t *testing.T) { path := tempfile() + defer os.RemoveAll(path) db, err := bolt.Open(path, 0666, nil) if err != nil { t.Fatal(err) } - if err := db.Close(); err != nil { + pageSize := int64(db.Info().PageSize) + if err = db.Close(); err != nil { t.Fatal(err) } // corrupt the database - if err := os.Truncate(path, int64(os.Getpagesize())); err != nil { + if err = os.Truncate(path, pageSize); err != nil { t.Fatal(err) } @@ -426,103 +360,6 @@ func TestOpen_FileTooSmall(t *testing.T) { } } -// Ensure that a database can be opened in read-only mode by multiple processes -// and that a database can not be opened in read-write mode and in read-only -// mode at the same time. -func TestOpen_ReadOnly(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("solaris fcntl locks don't support intra-process locking") - } - - bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) - - path := tempfile() - - // Open in read-write mode. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } else if db.IsReadOnly() { - t.Fatal("db should not be in read only mode") - } - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(bucket) - if err != nil { - return err - } - if err := b.Put(key, value); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - // Open in read-only mode. - db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - if err != nil { - t.Fatal(err) - } - - // Opening in read-write mode should return an error. - if _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}); err == nil { - t.Fatal("expected error") - } - - // And again (in read-only mode). - db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - if err != nil { - t.Fatal(err) - } - - // Verify both read-only databases are accessible. - for _, db := range []*bolt.DB{db0, db1} { - // Verify is is in read only mode indeed. - if !db.IsReadOnly() { - t.Fatal("expected read only mode") - } - - // Read-only databases should not allow updates. - if err := db.Update(func(*bolt.Tx) error { - panic(`should never get here`) - }); err != bolt.ErrDatabaseReadOnly { - t.Fatalf("unexpected error: %s", err) - } - - // Read-only databases should not allow beginning writable txns. - if _, err := db.Begin(true); err != bolt.ErrDatabaseReadOnly { - t.Fatalf("unexpected error: %s", err) - } - - // Verify the data. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) - if b == nil { - return fmt.Errorf("expected bucket `%s`", string(bucket)) - } - - got := string(b.Get(key)) - expected := string(value) - if got != expected { - return fmt.Errorf("expected `%s`, got `%s`", expected, got) - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - if err := db0.Close(); err != nil { - t.Fatal(err) - } - if err := db1.Close(); err != nil { - t.Fatal(err) - } -} - // TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough // to hold data from concurrent write transaction resolves the issue that // read transaction blocks the write transaction and causes deadlock. @@ -531,7 +368,7 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { path := tempfile() defer os.Remove(path) - initMmapSize := 1 << 31 // 2GB + initMmapSize := 1 << 30 // 1GB testWriteSize := 1 << 27 // 134MB db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) @@ -583,6 +420,144 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { } } +// TestDB_Open_ReadOnly checks a database in read only mode can read but not write. +func TestDB_Open_ReadOnly(t *testing.T) { + // Create a writable db, write k-v and close it. + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + f := db.f + o := &bolt.Options{ReadOnly: true} + readOnlyDB, err := bolt.Open(f, 0666, o) + if err != nil { + panic(err) + } + + if !readOnlyDB.IsReadOnly() { + t.Fatal("expect db in read only mode") + } + + // Read from a read-only transaction. + if err := readOnlyDB.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if !bytes.Equal(value, []byte("bar")) { + t.Fatal("expect value 'bar', got", value) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Can't launch read-write transaction. + if _, err := readOnlyDB.Begin(true); err != bolt.ErrDatabaseReadOnly { + t.Fatalf("unexpected error: %s", err) + } + + if err := readOnlyDB.Close(); err != nil { + t.Fatal(err) + } +} + +// TestOpen_BigPage checks the database uses bigger pages when +// changing PageSize. +func TestOpen_BigPage(t *testing.T) { + pageSize := os.Getpagesize() + + db1 := MustOpenWithOption(&bolt.Options{PageSize: pageSize * 2}) + defer db1.MustClose() + + db2 := MustOpenWithOption(&bolt.Options{PageSize: pageSize * 4}) + defer db2.MustClose() + + if db1sz, db2sz := fileSize(db1.f), fileSize(db2.f); db1sz >= db2sz { + t.Errorf("expected %d < %d", db1sz, db2sz) + } +} + +// TestOpen_RecoverFreeList tests opening the DB with free-list +// write-out after no free list sync will recover the free list +// and write it out. +func TestOpen_RecoverFreeList(t *testing.T) { + db := MustOpenWithOption(&bolt.Options{NoFreelistSync: true}) + defer db.MustClose() + + // Write some pages. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + wbuf := make([]byte, 8192) + for i := 0; i < 100; i++ { + s := fmt.Sprintf("%d", i) + b, err := tx.CreateBucket([]byte(s)) + if err != nil { + t.Fatal(err) + } + if err = b.Put([]byte(s), wbuf); err != nil { + t.Fatal(err) + } + } + if err = tx.Commit(); err != nil { + t.Fatal(err) + } + + // Generate free pages. + if tx, err = db.Begin(true); err != nil { + t.Fatal(err) + } + for i := 0; i < 50; i++ { + s := fmt.Sprintf("%d", i) + b := tx.Bucket([]byte(s)) + if b == nil { + t.Fatal(err) + } + if err := b.Delete([]byte(s)); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + // Record freelist count from opening with NoFreelistSync. + db.MustReopen() + freepages := db.Stats().FreePageN + if freepages == 0 { + t.Fatalf("no free pages on NoFreelistSync reopen") + } + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + // Check free page count is reconstructed when opened with freelist sync. + db.o = &bolt.Options{} + db.MustReopen() + // One less free page for syncing the free list on open. + freepages-- + if fp := db.Stats().FreePageN; fp < freepages { + t.Fatalf("closed with %d free pages, opened with %d", freepages, fp) + } +} + // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { var db bolt.DB @@ -614,6 +589,65 @@ func TestDB_BeginRW(t *testing.T) { } } +// TestDB_Concurrent_WriteTo checks that issuing WriteTo operations concurrently +// with commits does not produce corrupted db files. +func TestDB_Concurrent_WriteTo(t *testing.T) { + o := &bolt.Options{NoFreelistSync: false} + db := MustOpenWithOption(o) + defer db.MustClose() + + var wg sync.WaitGroup + wtxs, rtxs := 5, 5 + wg.Add(wtxs * rtxs) + f := func(tx *bolt.Tx) { + defer wg.Done() + f, err := ioutil.TempFile("", "bolt-") + if err != nil { + panic(err) + } + time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond) + tx.WriteTo(f) + tx.Rollback() + f.Close() + snap := &DB{nil, f.Name(), o} + snap.MustReopen() + defer snap.MustClose() + snap.MustCheck() + } + + tx1, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if _, err := tx1.CreateBucket([]byte("abc")); err != nil { + t.Fatal(err) + } + if err := tx1.Commit(); err != nil { + t.Fatal(err) + } + + for i := 0; i < wtxs; i++ { + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("abc")).Put([]byte{0}, []byte{0}); err != nil { + t.Fatal(err) + } + for j := 0; j < rtxs; j++ { + rtx, rerr := db.Begin(false) + if rerr != nil { + t.Fatal(rerr) + } + go f(rtx) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + } + wg.Wait() +} + // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB @@ -1301,7 +1335,7 @@ func ExampleDB_Begin_ReadOnly() { defer os.Remove(db.Path()) // Create a bucket using a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { + if err = db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }); err != nil { @@ -1314,16 +1348,16 @@ func ExampleDB_Begin_ReadOnly() { log.Fatal(err) } b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("john"), []byte("blue")); err != nil { + if err = b.Put([]byte("john"), []byte("blue")); err != nil { log.Fatal(err) } - if err := b.Put([]byte("abby"), []byte("red")); err != nil { + if err = b.Put([]byte("abby"), []byte("red")); err != nil { log.Fatal(err) } - if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil { + if err = b.Put([]byte("zephyr"), []byte("purple")); err != nil { log.Fatal(err) } - if err := tx.Commit(); err != nil { + if err = tx.Commit(); err != nil { log.Fatal(err) } @@ -1337,11 +1371,11 @@ func ExampleDB_Begin_ReadOnly() { fmt.Printf("%s likes %s\n", k, v) } - if err := tx.Rollback(); err != nil { + if err = tx.Rollback(); err != nil { log.Fatal(err) } - if err := db.Close(); err != nil { + if err = db.Close(); err != nil { log.Fatal(err) } @@ -1527,15 +1561,27 @@ func validateBatchBench(b *testing.B, db *DB) { // DB is a test wrapper for bolt.DB. type DB struct { *bolt.DB + f string + o *bolt.Options } // MustOpenDB returns a new, open DB at a temporary location. func MustOpenDB() *DB { - db, err := bolt.Open(tempfile(), 0666, nil) + return MustOpenWithOption(nil) +} + +// MustOpenDBWithOption returns a new, open DB at a temporary location with given options. +func MustOpenWithOption(o *bolt.Options) *DB { + f := tempfile() + db, err := bolt.Open(f, 0666, o) if err != nil { panic(err) } - return &DB{db} + return &DB{ + DB: db, + f: f, + o: o, + } } // Close closes the database and deletes the underlying file. @@ -1560,6 +1606,15 @@ func (db *DB) MustClose() { } } +// MustReopen reopen the database. Panic on error. +func (db *DB) MustReopen() { + indb, err := bolt.Open(db.f, 0666, db.o) + if err != nil { + panic(err) + } + db.DB = indb +} + // PrintStats prints the database stats func (db *DB) PrintStats() { var stats = db.Stats() @@ -1639,40 +1694,6 @@ func tempfile() string { return f.Name() } -// mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *bolt.Bucket, m map[string]string) { - found := make(map[string]string) - if err := b.ForEach(func(k, _ []byte) error { - found[string(k)] = "" - return nil - }); err != nil { - panic(err) - } - - // Check for keys found in bucket that shouldn't be there. - var keys []string - for k, _ := range found { - if _, ok := m[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) - } - - // Check for keys not found in bucket that should be there. - for k, _ := range m { - if _, ok := found[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) - } -} - func trunc(b []byte, length int) []byte { if length < len(b) { return b[:length] @@ -1692,15 +1713,9 @@ func fileSize(path string) int64 { return fi.Size() } -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - // u64tob converts a uint64 into an 8-byte slice. func u64tob(v uint64) []byte { b := make([]byte, 8) binary.BigEndian.PutUint64(b, v) return b } - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/doc.go rename to vendor/github.com/coreos/bbolt/doc.go diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go similarity index 100% rename from vendor/github.com/boltdb/bolt/errors.go rename to vendor/github.com/coreos/bbolt/errors.go diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go similarity index 56% rename from vendor/github.com/boltdb/bolt/freelist.go rename to vendor/github.com/coreos/bbolt/freelist.go index 1b7ba91b2a51..266f15429453 100644 --- a/vendor/github.com/boltdb/bolt/freelist.go +++ b/vendor/github.com/coreos/bbolt/freelist.go @@ -6,25 +6,40 @@ import ( "unsafe" ) +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. } // newFreelist returns an empty, initialized freelist. func newFreelist() *freelist { return &freelist{ - pending: make(map[txid][]pgid), + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), cache: make(map[pgid]bool), } } // size returns the size of the page after serialization. func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) } // count returns count of pages on the freelist @@ -40,27 +55,26 @@ func (f *freelist) free_count() int { // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int - for _, list := range f.pending { - count += len(list) + for _, txp := range f.pending { + count += len(txp.ids) } return count } -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) } - sort.Sort(m) - return pgids(f.ids).merge(m) + mergepgids(dst, f.ids, m) } // allocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { +func (f *freelist) allocate(txid txid, n int) pgid { if len(f.ids) == 0 { return 0 } @@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid { for i := pgid(0); i < pgid(n); i++ { delete(f.cache, initial+i) } - + f.allocs[initial] = txid return initial } @@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) { } // Free page and all its overflow pages. - var ids = f.pending[txid] + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. if f.cache[id] { panic(fmt.Sprintf("page %d already freed", id)) } - // Add to the freelist and cache. - ids = append(ids, id) + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) f.cache[id] = true } - f.pending[txid] = ids } // release moves all page ids for a transaction id (or older) to the freelist. func (f *freelist) release(txid txid) { m := make(pgids, 0) - for tid, ids := range f.pending { + for tid, txp := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. // Don't remove from the cache since the page is still free. - m = append(m, ids...) + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { delete(f.pending, tid) } } @@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) { // rollback removes the pages from a given pending tx. func (f *freelist) rollback(txid txid) { // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) + txp := f.pending[txid] + if txp == nil { + return } - - // Remove pages from pending list. + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) } // freed returns whether a given page is in the free list. @@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) @@ -169,7 +248,7 @@ func (f *freelist) read(p *page) { if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] f.ids = make([]pgid, len(ids)) copy(f.ids, ids) @@ -181,27 +260,33 @@ func (f *freelist) read(p *page) { f.reindex() } +// read initializes the freelist from a given list of ids. +func (f *freelist) readIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. func (f *freelist) write(p *page) error { // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() // Update the header flag. p.flags |= freelistPageFlag // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. - if len(ids) == 0 { - p.count = uint16(len(ids)) - } else if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) } return nil @@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) { // Build a cache of only pending pages. pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { pcache[pendingID] = true } } @@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) + f.cache = make(map[pgid]bool, len(f.ids)) for _, id := range f.ids { f.cache[id] = true } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { f.cache[pendingID] = true } } diff --git a/vendor/github.com/coreos/bbolt/freelist_test.go b/vendor/github.com/coreos/bbolt/freelist_test.go new file mode 100644 index 000000000000..24ed4cf15dd8 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/freelist_test.go @@ -0,0 +1,288 @@ +package bolt + +import ( + "math/rand" + "reflect" + "sort" + "testing" + "unsafe" +) + +// Ensure that a page is added to a transaction's freelist. +func TestFreelist_free(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12}) + if !reflect.DeepEqual([]pgid{12}, f.pending[100].ids) { + t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) + } +} + +// Ensure that a page and its overflow is added to a transaction's freelist. +func TestFreelist_free_overflow(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 3}) + if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100].ids) { + t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) + } +} + +// Ensure that a transaction's free pages can be released. +func TestFreelist_release(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 1}) + f.free(100, &page{id: 9}) + f.free(102, &page{id: 39}) + f.release(100) + f.release(101) + if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + f.release(102) + if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that releaseRange handles boundary conditions correctly +func TestFreelist_releaseRange(t *testing.T) { + type testRange struct { + begin, end txid + } + + type testPage struct { + id pgid + n int + allocTxn txid + freeTxn txid + } + + var releaseRangeTests = []struct { + title string + pagesIn []testPage + releaseRanges []testRange + wantFree []pgid + }{ + { + title: "Single pending in range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{1, 300}}, + wantFree: []pgid{3}, + }, + { + title: "Single pending with minimum end range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{1, 200}}, + wantFree: []pgid{3}, + }, + { + title: "Single pending outsize minimum end range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{1, 199}}, + wantFree: nil, + }, + { + title: "Single pending with minimum begin range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{100, 300}}, + wantFree: []pgid{3}, + }, + { + title: "Single pending outside minimum begin range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{101, 300}}, + wantFree: nil, + }, + { + title: "Single pending in minimum range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, + releaseRanges: []testRange{{199, 200}}, + wantFree: []pgid{3}, + }, + { + title: "Single pending and read transaction at 199", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, + releaseRanges: []testRange{{100, 198}, {200, 300}}, + wantFree: nil, + }, + { + title: "Adjacent pending and read transactions at 199, 200", + pagesIn: []testPage{ + {id: 3, n: 1, allocTxn: 199, freeTxn: 200}, + {id: 4, n: 1, allocTxn: 200, freeTxn: 201}, + }, + releaseRanges: []testRange{ + {100, 198}, + {200, 199}, // Simulate the ranges db.freePages might produce. + {201, 300}, + }, + wantFree: nil, + }, + { + title: "Out of order ranges", + pagesIn: []testPage{ + {id: 3, n: 1, allocTxn: 199, freeTxn: 200}, + {id: 4, n: 1, allocTxn: 200, freeTxn: 201}, + }, + releaseRanges: []testRange{ + {201, 199}, + {201, 200}, + {200, 200}, + }, + wantFree: nil, + }, + { + title: "Multiple pending, read transaction at 150", + pagesIn: []testPage{ + {id: 3, n: 1, allocTxn: 100, freeTxn: 200}, + {id: 4, n: 1, allocTxn: 100, freeTxn: 125}, + {id: 5, n: 1, allocTxn: 125, freeTxn: 150}, + {id: 6, n: 1, allocTxn: 125, freeTxn: 175}, + {id: 7, n: 2, allocTxn: 150, freeTxn: 175}, + {id: 9, n: 2, allocTxn: 175, freeTxn: 200}, + }, + releaseRanges: []testRange{{50, 149}, {151, 300}}, + wantFree: []pgid{4, 9}, + }, + } + + for _, c := range releaseRangeTests { + f := newFreelist() + + for _, p := range c.pagesIn { + for i := uint64(0); i < uint64(p.n); i++ { + f.ids = append(f.ids, pgid(uint64(p.id)+i)) + } + } + for _, p := range c.pagesIn { + f.allocate(p.allocTxn, p.n) + } + + for _, p := range c.pagesIn { + f.free(p.freeTxn, &page{id: p.id}) + } + + for _, r := range c.releaseRanges { + f.releaseRange(r.begin, r.end) + } + + if exp := c.wantFree; !reflect.DeepEqual(exp, f.ids) { + t.Errorf("exp=%v; got=%v for %s", exp, f.ids, c.title) + } + } +} + +// Ensure that a freelist can find contiguous blocks of pages. +func TestFreelist_allocate(t *testing.T) { + f := newFreelist() + f.ids = []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} + if id := int(f.allocate(1, 3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.allocate(1, 1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.allocate(1, 3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(1, 2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.allocate(1, 1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.allocate(1, 0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(1, 0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + if id := int(f.allocate(1, 1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.allocate(1, 1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.allocate(1, 1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can deserialize from a freelist page. +func TestFreelist_read(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = freelistPageFlag + page.count = 2 + + // Insert 2 page ids. + ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) + ids[0] = 23 + ids[1] = 50 + + // Deserialize page into a freelist. + f := newFreelist() + f.read(page) + + // Ensure that there are two page ids in the freelist. + if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can serialize into a freelist page. +func TestFreelist_write(t *testing.T) { + // Create a freelist and write it to a page. + var buf [4096]byte + f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid]*txPending)} + f.pending[100] = &txPending{ids: []pgid{28, 11}} + f.pending[101] = &txPending{ids: []pgid{3}} + p := (*page)(unsafe.Pointer(&buf[0])) + if err := f.write(p); err != nil { + t.Fatal(err) + } + + // Read the page back out. + f2 := newFreelist() + f2.read(p) + + // Ensure that the freelist is correct. + // All pages should be present and in reverse order. + if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { + t.Fatalf("exp=%v; got=%v", exp, f2.ids) + } +} + +func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } +func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } +func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } +func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } + +func benchmark_FreelistRelease(b *testing.B, size int) { + ids := randomPgids(size) + pending := randomPgids(len(ids) / 400) + b.ResetTimer() + for i := 0; i < b.N; i++ { + txp := &txPending{ids: pending} + f := &freelist{ids: ids, pending: map[txid]*txPending{1: txp}} + f.release(1) + } +} + +func randomPgids(n int) []pgid { + rand.Seed(42) + pgids := make(pgids, n) + for i := range pgids { + pgids[i] = pgid(rand.Int63()) + } + sort.Sort(pgids) + return pgids +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/coreos/bbolt/node.go similarity index 99% rename from vendor/github.com/boltdb/bolt/node.go rename to vendor/github.com/coreos/bbolt/node.go index 159318b229cb..f4ce240edddb 100644 --- a/vendor/github.com/boltdb/bolt/node.go +++ b/vendor/github.com/coreos/bbolt/node.go @@ -365,7 +365,7 @@ func (n *node) spill() error { } // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) if err != nil { return err } diff --git a/vendor/github.com/boltdb/bolt/node_test.go b/vendor/github.com/coreos/bbolt/node_test.go similarity index 100% rename from vendor/github.com/boltdb/bolt/node_test.go rename to vendor/github.com/coreos/bbolt/node_test.go diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/coreos/bbolt/page.go similarity index 88% rename from vendor/github.com/boltdb/bolt/page.go rename to vendor/github.com/coreos/bbolt/page.go index 7651a6bf7d99..cde403ae86de 100644 --- a/vendor/github.com/boltdb/bolt/page.go +++ b/vendor/github.com/coreos/bbolt/page.go @@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b - } else if len(b) == 0 { + } + if len(b) == 0 { return a } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) + // Merged will hold all elements from both lists. + merged := dst[:0] // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b @@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids { } // Append what's left in follow. - merged = append(merged, follow...) - - return merged + _ = append(merged, follow...) } diff --git a/vendor/github.com/boltdb/bolt/page_test.go b/vendor/github.com/coreos/bbolt/page_test.go similarity index 100% rename from vendor/github.com/boltdb/bolt/page_test.go rename to vendor/github.com/coreos/bbolt/page_test.go diff --git a/vendor/github.com/boltdb/bolt/quick_test.go b/vendor/github.com/coreos/bbolt/quick_test.go similarity index 89% rename from vendor/github.com/boltdb/bolt/quick_test.go rename to vendor/github.com/coreos/bbolt/quick_test.go index 4da581775a35..9e27792e1a0e 100644 --- a/vendor/github.com/boltdb/bolt/quick_test.go +++ b/vendor/github.com/coreos/bbolt/quick_test.go @@ -50,9 +50,17 @@ func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { n := rand.Intn(qmaxitems-1) + 1 items := make(testdata, n) + used := make(map[string]bool) for i := 0; i < n; i++ { item := &items[i] - item.Key = randByteSlice(rand, 1, qmaxksize) + // Ensure that keys are unique by looping until we find one that we have not already used. + for { + item.Key = randByteSlice(rand, 1, qmaxksize) + if !used[string(item.Key)] { + used[string(item.Key)] = true + break + } + } item.Value = randByteSlice(rand, 0, qmaxvsize) } return reflect.ValueOf(items) diff --git a/vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go b/vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go new file mode 100644 index 000000000000..da2031eed4a2 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go @@ -0,0 +1,47 @@ +package bolt_test + +import ( + "testing" + + "github.com/coreos/bbolt" +) + +func TestSimulateNoFreeListSync_1op_1p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 1, 1) +} +func TestSimulateNoFreeListSync_10op_1p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 10, 1) +} +func TestSimulateNoFreeListSync_100op_1p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 100, 1) +} +func TestSimulateNoFreeListSync_1000op_1p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 1000, 1) +} +func TestSimulateNoFreeListSync_10000op_1p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 10000, 1) +} +func TestSimulateNoFreeListSync_10op_10p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 10, 10) +} +func TestSimulateNoFreeListSync_100op_10p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 100, 10) +} +func TestSimulateNoFreeListSync_1000op_10p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 1000, 10) +} +func TestSimulateNoFreeListSync_10000op_10p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 10000, 10) +} +func TestSimulateNoFreeListSync_100op_100p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 100, 100) +} +func TestSimulateNoFreeListSync_1000op_100p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 1000, 100) +} +func TestSimulateNoFreeListSync_10000op_100p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 10000, 100) +} +func TestSimulateNoFreeListSync_10000op_1000p(t *testing.T) { + testSimulate(t, &bolt.Options{NoFreelistSync: true}, 8, 10000, 1000) +} diff --git a/vendor/github.com/boltdb/bolt/simulation_test.go b/vendor/github.com/coreos/bbolt/simulation_test.go similarity index 67% rename from vendor/github.com/boltdb/bolt/simulation_test.go rename to vendor/github.com/coreos/bbolt/simulation_test.go index 383101655704..a5889c02f86d 100644 --- a/vendor/github.com/boltdb/bolt/simulation_test.go +++ b/vendor/github.com/coreos/bbolt/simulation_test.go @@ -7,28 +7,28 @@ import ( "sync" "testing" - "github.com/boltdb/bolt" + "github.com/coreos/bbolt" ) -func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) } -func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } -func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } -func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } +func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, nil, 1, 1, 1) } +func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, nil, 1, 10, 1) } +func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, nil, 1, 100, 1) } +func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, nil, 1, 1000, 1) } +func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, nil, 1, 10000, 1) } -func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } -func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } -func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } -func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } +func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, nil, 1, 10, 10) } +func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, nil, 1, 100, 10) } +func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, nil, 1, 1000, 10) } +func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, nil, 1, 10000, 10) } -func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } -func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } -func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } +func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, nil, 1, 100, 100) } +func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, nil, 1, 1000, 100) } +func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, nil, 1, 10000, 100) } -func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } +func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, nil, 1, 10000, 1000) } // Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. -func testSimulate(t *testing.T, threadCount, parallelism int) { +func testSimulate(t *testing.T, openOption *bolt.Options, round, threadCount, parallelism int) { if testing.Short() { t.Skip("skipping test in short mode.") } @@ -42,81 +42,88 @@ func testSimulate(t *testing.T, threadCount, parallelism int) { var versions = make(map[int]*QuickDB) versions[1] = NewQuickDB() - db := MustOpenDB() + db := MustOpenWithOption(openOption) defer db.MustClose() var mutex sync.Mutex // Run n threads in parallel, each with their own operation. var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers - - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } - - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } + for n := 0; n < round; n++ { - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.ID()] - if writable { - qdb = versions[tx.ID()-1].Copy() - } - mutex.Unlock() + var threads = make(chan bool, parallelism) + var i int + for { + threads <- true + wg.Add(1) + writable := ((rand.Int() % 100) < 20) // 20% writers - // Make sure we commit/rollback the tx at the end and update the state. + // Choose an operation to execute. + var handler simulateHandler if writable { - defer func() { - mutex.Lock() - versions[tx.ID()] = qdb - mutex.Unlock() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() + handler = writerHandlers[rand.Intn(len(writerHandlers))] } else { - defer func() { _ = tx.Rollback() }() + handler = readerHandlers[rand.Intn(len(readerHandlers))] } - // Ignore operation if we don't have data yet. - if qdb == nil { - return + // Execute a thread for the given operation. + go func(writable bool, handler simulateHandler) { + defer wg.Done() + + // Start transaction. + tx, err := db.Begin(writable) + if err != nil { + t.Fatal("tx begin: ", err) + } + + // Obtain current state of the dataset. + mutex.Lock() + var qdb = versions[tx.ID()] + if writable { + qdb = versions[tx.ID()-1].Copy() + } + mutex.Unlock() + + // Make sure we commit/rollback the tx at the end and update the state. + if writable { + defer func() { + mutex.Lock() + versions[tx.ID()] = qdb + mutex.Unlock() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + } else { + defer func() { _ = tx.Rollback() }() + } + + // Ignore operation if we don't have data yet. + if qdb == nil { + return + } + + // Execute handler. + handler(tx, qdb) + + // Release a thread back to the scheduling loop. + <-threads + }(writable, handler) + + i++ + if i > threadCount { + break } + } - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) + // Wait until all threads are done. + wg.Wait() - i++ - if i > threadCount { - break - } + db.MustClose() + db.MustReopen() } - - // Wait until all threads are done. - wg.Wait() } type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go similarity index 94% rename from vendor/github.com/boltdb/bolt/tx.go rename to vendor/github.com/coreos/bbolt/tx.go index 1cfb4cde8555..aefcec09e33d 100644 --- a/vendor/github.com/boltdb/bolt/tx.go +++ b/vendor/github.com/coreos/bbolt/tx.go @@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error { // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil + return fn(k, tx.root.Bucket(k)) }) } @@ -169,28 +166,18 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { return err } + } else { + tx.meta.freelist = pgidNoFreelist } // Write dirty pages to disk. @@ -235,6 +222,31 @@ func (tx *Tx) Commit() error { return nil } +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { @@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer func() { _ = f.Close() }() + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { return n, fmt.Errorf("seek: %s", err) } @@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, err } - return n, f.Close() + return n, nil } // CopyFile copies the entire database to file at the given path. @@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error { } func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + // Check if any pages are double freed. freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } @@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) { reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } } // Recursively check buckets. @@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) + p, err := tx.db.allocate(tx.meta.txid, count) if err != nil { return nil, err } diff --git a/vendor/github.com/boltdb/bolt/tx_test.go b/vendor/github.com/coreos/bbolt/tx_test.go similarity index 81% rename from vendor/github.com/boltdb/bolt/tx_test.go rename to vendor/github.com/coreos/bbolt/tx_test.go index 2201e79288cc..de92cb535120 100644 --- a/vendor/github.com/boltdb/bolt/tx_test.go +++ b/vendor/github.com/coreos/bbolt/tx_test.go @@ -8,9 +8,57 @@ import ( "os" "testing" - "github.com/boltdb/bolt" + "github.com/coreos/bbolt" ) +// TestTx_Check_ReadOnly tests consistency checking on a ReadOnly database. +func TestTx_Check_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.Close() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + readOnlyDB, err := bolt.Open(db.f, 0666, &bolt.Options{ReadOnly: true}) + if err != nil { + t.Fatal(err) + } + defer readOnlyDB.Close() + + tx, err := readOnlyDB.Begin(false) + if err != nil { + t.Fatal(err) + } + // ReadOnly DB will load freelist on Check call. + numChecks := 2 + errc := make(chan error, numChecks) + check := func() { + err, _ := <-tx.Check() + errc <- err + } + // Ensure the freelist is not reloaded and does not race. + for i := 0; i < numChecks; i++ { + go check() + } + for i := 0; i < numChecks; i++ { + if err := <-errc; err != nil { + t.Fatal(err) + } + } +} + // Ensure that committing a closed transaction returns an error. func TestTx_Commit_ErrTxClosed(t *testing.T) { db := MustOpenDB() @@ -602,6 +650,111 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) { } } +// TestTx_releaseRange ensures db.freePages handles page releases +// correctly when there are transaction that are no longer reachable +// via any read/write transactions and are "between" ongoing read +// transactions, which requires they must be freed by +// freelist.releaseRange. +func TestTx_releaseRange(t *testing.T) { + // Set initial mmap size well beyond the limit we will hit in this + // test, since we are testing with long running read transactions + // and will deadlock if db.grow is triggered. + db := MustOpenWithOption(&bolt.Options{InitialMmapSize: os.Getpagesize() * 100}) + defer db.MustClose() + + bucket := "bucket" + + put := func(key, value string) { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(bucket)) + if err != nil { + t.Fatal(err) + } + return b.Put([]byte(key), []byte(value)) + }); err != nil { + t.Fatal(err) + } + } + + del := func(key string) { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(bucket)) + if err != nil { + t.Fatal(err) + } + return b.Delete([]byte(key)) + }); err != nil { + t.Fatal(err) + } + } + + getWithTxn := func(txn *bolt.Tx, key string) []byte { + return txn.Bucket([]byte(bucket)).Get([]byte(key)) + } + + openReadTxn := func() *bolt.Tx { + readTx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + return readTx + } + + checkWithReadTxn := func(txn *bolt.Tx, key string, wantValue []byte) { + value := getWithTxn(txn, key) + if !bytes.Equal(value, wantValue) { + t.Errorf("Wanted value to be %s for key %s, but got %s", wantValue, key, string(value)) + } + } + + rollback := func(txn *bolt.Tx) { + if err := txn.Rollback(); err != nil { + t.Fatal(err) + } + } + + put("k1", "v1") + rtx1 := openReadTxn() + put("k2", "v2") + hold1 := openReadTxn() + put("k3", "v3") + hold2 := openReadTxn() + del("k3") + rtx2 := openReadTxn() + del("k1") + hold3 := openReadTxn() + del("k2") + hold4 := openReadTxn() + put("k4", "v4") + hold5 := openReadTxn() + + // Close the read transactions we established to hold a portion of the pages in pending state. + rollback(hold1) + rollback(hold2) + rollback(hold3) + rollback(hold4) + rollback(hold5) + + // Execute a write transaction to trigger a releaseRange operation in the db + // that will free multiple ranges between the remaining open read transactions, now that the + // holds have been rolled back. + put("k4", "v4") + + // Check that all long running reads still read correct values. + checkWithReadTxn(rtx1, "k1", []byte("v1")) + checkWithReadTxn(rtx2, "k2", []byte("v2")) + rollback(rtx1) + rollback(rtx2) + + // Check that the final state is correct. + rtx7 := openReadTxn() + checkWithReadTxn(rtx7, "k1", nil) + checkWithReadTxn(rtx7, "k2", nil) + checkWithReadTxn(rtx7, "k3", nil) + checkWithReadTxn(rtx7, "k4", []byte("v4")) + rollback(rtx7) +} + func ExampleTx_Rollback() { // Open the database. db, err := bolt.Open(tempfile(), 0666, nil) diff --git a/vendor/github.com/coreos/etcd/.gitignore b/vendor/github.com/coreos/etcd/.gitignore index 210be6fe97d5..1a68387a7db7 100644 --- a/vendor/github.com/coreos/etcd/.gitignore +++ b/vendor/github.com/coreos/etcd/.gitignore @@ -1,15 +1,22 @@ +/agent-* /coverage +/covdir /gopath /gopath.proto /go-bindata +/release /machine* /bin +.Dockerfile-test .vagrant *.etcd +*.log /etcd *.swp /hack/insta-discovery/.env *.test tools/functional-tester/docker/bin +hack/scripts-dev/docker-dns/.Dockerfile +hack/scripts-dev/docker-dns-srv/.Dockerfile hack/tls-setup/certs .idea diff --git a/vendor/github.com/coreos/etcd/.semaphore.sh b/vendor/github.com/coreos/etcd/.semaphore.sh new file mode 100755 index 000000000000..1a6c85a62f51 --- /dev/null +++ b/vendor/github.com/coreos/etcd/.semaphore.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +TEST_SUFFIX=$(date +%s | base64 | head -c 15) + +TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' MANUAL_VER=v3.2.11" +if [ "$TEST_ARCH" == "386" ]; then + TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" +fi + +docker run \ + --rm \ + --volume=`pwd`:/go/src/github.com/coreos/etcd \ + gcr.io/etcd-development/etcd-test:go1.8.5 \ + /bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log" + +! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-${TEST_SUFFIX}.log diff --git a/vendor/github.com/coreos/etcd/.travis.yml b/vendor/github.com/coreos/etcd/.travis.yml index fb8e9b83284f..00f2b810223c 100644 --- a/vendor/github.com/coreos/etcd/.travis.yml +++ b/vendor/github.com/coreos/etcd/.travis.yml @@ -1,11 +1,13 @@ -dist: trusty language: go go_import_path: github.com/coreos/etcd -sudo: false + +sudo: required + +services: docker go: - - 1.8.3 - - tip +- 1.8.5 +- tip notifications: on_success: never @@ -13,19 +15,25 @@ notifications: env: matrix: - - TARGET=amd64 - - TARGET=darwin-amd64 - - TARGET=windows-amd64 - - TARGET=arm64 - - TARGET=arm - - TARGET=386 - - TARGET=ppc64le + - TARGET=amd64 + - TARGET=amd64-go-tip + - TARGET=darwin-amd64 + - TARGET=windows-amd64 + - TARGET=arm64 + - TARGET=arm + - TARGET=386 + - TARGET=ppc64le matrix: fast_finish: true allow_failures: - - go: tip + - go: tip + env: TARGET=amd64-go-tip exclude: + - go: 1.8.5 + env: TARGET=amd64-go-tip + - go: tip + env: TARGET=amd64 - go: tip env: TARGET=darwin-amd64 - go: tip @@ -39,45 +47,42 @@ matrix: - go: tip env: TARGET=ppc64le -addons: - apt: - sources: - - debian-sid - packages: - - libpcap-dev - - libaspell-dev - - libhunspell-dev - - shellcheck - before_install: - - go get -v -u github.com/chzchzchz/goword - - go get -v -u github.com/coreos/license-bill-of-materials - - go get -v -u honnef.co/go/tools/cmd/gosimple - - go get -v -u honnef.co/go/tools/cmd/unused - - go get -v -u honnef.co/go/tools/cmd/staticcheck - - ./scripts/install-marker.sh amd64 +- docker pull gcr.io/etcd-development/etcd-test:go1.8.5 -# disable godep restore override install: - - pushd cmd/etcd && go get -t -v ./... && popd +- pushd cmd/etcd && go get -t -v ./... && popd script: - > case "${TARGET}" in amd64) + docker run --rm \ + --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \ + /bin/bash -c "GOARCH=amd64 ./test" + ;; + amd64-go-tip) GOARCH=amd64 ./test ;; darwin-amd64) - GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=darwin GOARCH=amd64 ./build + docker run --rm \ + --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \ + /bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=darwin GOARCH=amd64 ./build" ;; windows-amd64) - GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=windows GOARCH=amd64 ./build + docker run --rm \ + --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \ + /bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=windows GOARCH=amd64 ./build" ;; 386) - GOARCH=386 PASSES="build unit" ./test + docker run --rm \ + --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \ + /bin/bash -c "GOARCH=386 PASSES='build unit' ./test" ;; *) # test building out of gopath - GO_BUILD_FLAGS="-a -v" GOPATH="" GOARCH="${TARGET}" ./build + docker run --rm \ + --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \ + /bin/bash -c "GO_BUILD_FLAGS='-a -v' GOARCH='${TARGET}' ./build" ;; esac diff --git a/vendor/github.com/coreos/etcd/Dockerfile-test b/vendor/github.com/coreos/etcd/Dockerfile-test new file mode 100644 index 000000000000..8e0be6a3f183 --- /dev/null +++ b/vendor/github.com/coreos/etcd/Dockerfile-test @@ -0,0 +1,57 @@ +FROM ubuntu:16.10 + +RUN rm /bin/sh && ln -s /bin/bash /bin/sh +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +RUN apt-get -y update \ + && apt-get -y install \ + build-essential \ + gcc \ + apt-utils \ + pkg-config \ + software-properties-common \ + apt-transport-https \ + libssl-dev \ + sudo \ + bash \ + curl \ + wget \ + tar \ + git \ + netcat \ + libaspell-dev \ + libhunspell-dev \ + hunspell-en-us \ + aspell-en \ + shellcheck \ + && apt-get -y update \ + && apt-get -y upgrade \ + && apt-get -y autoremove \ + && apt-get -y autoclean + +ENV GOROOT /usr/local/go +ENV GOPATH /go +ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} +ENV GO_VERSION REPLACE_ME_GO_VERSION +ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang +RUN rm -rf ${GOROOT} \ + && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ + && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ + && go version + +RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd +WORKDIR ${GOPATH}/src/github.com/coreos/etcd + +ADD ./scripts/install-marker.sh /tmp/install-marker.sh + +RUN go get -v -u -tags spell github.com/chzchzchz/goword \ + && go get -v -u github.com/coreos/license-bill-of-materials \ + && go get -v -u honnef.co/go/tools/cmd/gosimple \ + && go get -v -u honnef.co/go/tools/cmd/unused \ + && go get -v -u honnef.co/go/tools/cmd/staticcheck \ + && go get -v -u github.com/wadey/gocovmerge \ + && go get -v -u github.com/gordonklaus/ineffassign \ + && /tmp/install-marker.sh amd64 \ + && rm -f /tmp/install-marker.sh \ + && curl -s https://codecov.io/bash >/codecov \ + && chmod 700 /codecov diff --git a/vendor/github.com/coreos/etcd/Documentation/learning/auth_design.md b/vendor/github.com/coreos/etcd/Documentation/learning/auth_design.md index 192f4b21779c..52c979731bfb 100644 --- a/vendor/github.com/coreos/etcd/Documentation/learning/auth_design.md +++ b/vendor/github.com/coreos/etcd/Documentation/learning/auth_design.md @@ -60,7 +60,7 @@ For avoiding such a situation, the API layer performs *version number validation After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose. -Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`). +Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromIncomingContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`). ### Checking permission in the state machine diff --git a/vendor/github.com/coreos/etcd/Documentation/op-guide/monitoring.md b/vendor/github.com/coreos/etcd/Documentation/op-guide/monitoring.md index 5ea70ba7337f..2ae046c2d82b 100644 --- a/vendor/github.com/coreos/etcd/Documentation/op-guide/monitoring.md +++ b/vendor/github.com/coreos/etcd/Documentation/op-guide/monitoring.md @@ -41,11 +41,6 @@ When Elapsed (s) 17:34:51.999535 . 36 ... sent: header: kvs: count:1 ``` -## Metrics endpoint - -Each etcd server exports metrics under the `/metrics` path on its client port and optionally on interfaces given by `--listen-metrics-urls`. ->>>>>>> 607d0762e... Documentation/op-guide: remove grafana demo link - The metrics can be fetched with `curl`: ```sh diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go index 20b57f284897..3fac7f5a6fd7 100644 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -992,7 +992,7 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil } diff --git a/vendor/github.com/coreos/etcd/auth/store_test.go b/vendor/github.com/coreos/etcd/auth/store_test.go index bf0a4fc93196..f2a25aac6bf2 100644 --- a/vendor/github.com/coreos/etcd/auth/store_test.go +++ b/vendor/github.com/coreos/etcd/auth/store_test.go @@ -453,7 +453,8 @@ func TestAuthInfoFromCtx(t *testing.T) { t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"})) + // as if it came from RPC + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"})) ai, err = as.AuthInfoFromCtx(ctx) if err != nil && ai != nil { t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) @@ -465,19 +466,19 @@ func TestAuthInfoFromCtx(t *testing.T) { t.Error(err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"})) + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"})) _, err = as.AuthInfoFromCtx(ctx) if err != ErrInvalidAuthToken { t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"})) + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"})) _, err = as.AuthInfoFromCtx(ctx) if err != ErrInvalidAuthToken { t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": resp.Token})) + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": resp.Token})) ai, err = as.AuthInfoFromCtx(ctx) if err != nil { t.Error(err) @@ -521,7 +522,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) { donec := make(chan struct{}) go func() { defer close(donec) - ctx := metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "test"})) + ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "test"})) as.AuthInfoFromCtx(ctx) }() as.UserAdd(&pb.AuthUserAddRequest{Name: "test"}) diff --git a/vendor/github.com/coreos/etcd/bill-of-materials.json b/vendor/github.com/coreos/etcd/bill-of-materials.json index 0a0f98a99519..1afbc70c2526 100644 --- a/vendor/github.com/coreos/etcd/bill-of-materials.json +++ b/vendor/github.com/coreos/etcd/bill-of-materials.json @@ -27,19 +27,19 @@ ] }, { - "project": "github.com/boltdb/bolt", + "project": "github.com/cockroachdb/cmux", "licenses": [ { - "type": "MIT License", + "type": "Apache License 2.0", "confidence": 1 } ] }, { - "project": "github.com/cockroachdb/cmux", + "project": "github.com/coreos/bbolt", "licenses": [ { - "type": "Apache License 2.0", + "type": "MIT License", "confidence": 1 } ] @@ -345,12 +345,21 @@ } ] }, + { + "project": "google.golang.org/genproto/googleapis", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, { "project": "google.golang.org/grpc", "licenses": [ { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.979253112033195 + "type": "Apache License 2.0", + "confidence": 1 } ] }, diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md index 643d0e2f070c..376bfba7614d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -1,6 +1,6 @@ # etcd/clientv3 -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) `etcd/clientv3` is the official Go etcd client for v3. diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index 18f1f5b09195..a64b8caca895 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -100,60 +101,65 @@ type Auth interface { } type auth struct { - remote pb.AuthClient + remote pb.AuthClient + callOpts []grpc.CallOption } func NewAuth(c *Client) Auth { - return &auth{remote: pb.NewAuthClient(c.ActiveConnection())} + api := &auth{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) return (*AuthDisableResponse)(resp), toErr(ctx, err) } func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserAddResponse)(resp), toErr(ctx, err) } func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) } func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) } func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) } func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) return (*AuthUserListResponse)(resp), toErr(ctx, err) } func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) return (*AuthRoleAddResponse)(resp), toErr(ctx, err) } @@ -163,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran RangeEnd: []byte(rangeEnd), PermType: authpb.Permission_Type(permType), } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) } @@ -196,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) { } type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption } func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthenticateResponse)(resp), toErr(ctx, err) } @@ -209,14 +216,18 @@ func (auth *authenticator) close() { auth.conn.Close() } -func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { +func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return nil, err } - return &authenticator{ + api := &authenticator{ conn: conn, remote: pb.NewAuthClient(conn), - }, nil + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go deleted file mode 100644 index 6ae047e98419..000000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") - -// simpleBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type simpleBalancer struct { - // addrs are the client's endpoints for grpc - addrs []grpc.Address - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // mu protects upEps, pinAddr, and connectingAddr - mu sync.RWMutex - - // upc closes when upEps transitions from empty to non-zero or the balancer closes. - upc chan struct{} - - // downc closes when grpc calls down() on pinAddr - downc chan struct{} - - // stopc is closed to signal updateNotifyLoop should stop. - stopc chan struct{} - - // donec closes when all goroutines are exited - donec chan struct{} - - // updateAddrsC notifies updateNotifyLoop to update addrs. - updateAddrsC chan struct{} - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - host2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // intialization and shutdown. - pinAddr string - - closed bool -} - -func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address, 1) - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - sb := &simpleBalancer{ - addrs: addrs, - notifyCh: notifyCh, - readyc: make(chan struct{}), - upc: make(chan struct{}), - stopc: make(chan struct{}), - downc: make(chan struct{}), - donec: make(chan struct{}), - updateAddrsC: make(chan struct{}, 1), - host2ep: getHost2ep(eps), - } - close(sb.downc) - go sb.updateNotifyLoop() - return sb -} - -func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *simpleBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *simpleBalancer) getEndpoint(host string) string { - b.mu.Lock() - defer b.mu.Unlock() - return b.host2ep[host] -} - -func getHost2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} - -func (b *simpleBalancer) updateAddrs(eps []string) { - np := getHost2ep(eps) - - b.mu.Lock() - - match := len(np) == len(b.host2ep) - for k, v := range np { - if b.host2ep[k] != v { - match = false - break - } - } - if match { - // same endpoints, so no need to update address - b.mu.Unlock() - return - } - - b.host2ep = np - - addrs := make([]grpc.Address, 0, len(eps)) - for i := range eps { - addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) - } - b.addrs = addrs - - // updating notifyCh can trigger new connections, - // only update addrs if all connections are down - // or addrs does not include pinAddr. - update := !hasAddr(addrs, b.pinAddr) - b.mu.Unlock() - - if update { - select { - case b.updateAddrsC <- struct{}{}: - case <-b.stopc: - } - } -} - -func hasAddr(addrs []grpc.Address, targetAddr string) bool { - for _, addr := range addrs { - if targetAddr == addr.Addr { - return true - } - } - return false -} - -func (b *simpleBalancer) updateNotifyLoop() { - defer close(b.donec) - - for { - b.mu.RLock() - upc, downc, addr := b.upc, b.downc, b.pinAddr - b.mu.RUnlock() - // downc or upc should be closed - select { - case <-downc: - downc = nil - default: - } - select { - case <-upc: - upc = nil - default: - } - switch { - case downc == nil && upc == nil: - // stale - select { - case <-b.stopc: - return - default: - } - case downc == nil: - b.notifyAddrs() - select { - case <-upc: - case <-b.updateAddrsC: - b.notifyAddrs() - case <-b.stopc: - return - } - case upc == nil: - select { - // close connections that are not the pinned address - case b.notifyCh <- []grpc.Address{{Addr: addr}}: - case <-downc: - case <-b.stopc: - return - } - select { - case <-downc: - case <-b.updateAddrsC: - case <-b.stopc: - return - } - b.notifyAddrs() - } - } -} - -func (b *simpleBalancer) notifyAddrs() { - b.mu.RLock() - addrs := b.addrs - b.mu.RUnlock() - select { - case b.notifyCh <- addrs: - case <-b.stopc: - } -} - -func (b *simpleBalancer) Up(addr grpc.Address) func(error) { - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Or our simplerBalancer - // might panic since b.upc is closed. - if b.closed { - return func(err error) {} - } - // gRPC might call Up on a stale address. - // Prevent updating pinAddr with a stale address. - if !hasAddr(b.addrs, addr.Addr) { - return func(err error) {} - } - if b.pinAddr != "" { - return func(err error) {} - } - // notify waiting Get()s and pin first connected address - close(b.upc) - b.downc = make(chan struct{}) - b.pinAddr = addr.Addr - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - return func(err error) { - b.mu.Lock() - b.upc = make(chan struct{}) - close(b.downc) - b.pinAddr = "" - b.mu.Unlock() - } -} - -func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var ( - addr string - closed bool - ) - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr == "" { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-b.donec: - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - closed = b.closed - addr = b.pinAddr - b.mu.RUnlock() - // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if addr != "" { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *simpleBalancer) Close() error { - b.mu.Lock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - b.mu.Unlock() - <-b.donec - return nil - } - b.closed = true - close(b.stopc) - b.pinAddr = "" - - // In the case of following scenario: - // 1. upc is not closed; no pinned address - // 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks - // 3. clientconn.Close() calls balancer.Close(); closed = true - // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled - // we must close upc so Get() exits from blocking on upc - select { - case <-b.upc: - default: - // terminate all waiting Get()s - close(b.upc) - } - - b.mu.Unlock() - - // wait for updateNotifyLoop to finish - <-b.donec - close(b.notifyCh) - - return nil -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer_test.go b/vendor/github.com/coreos/etcd/clientv3/balancer_test.go deleted file mode 100644 index 5245b69a2aa8..000000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "errors" - "net" - "sync" - "testing" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/testutil" - - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -var ( - endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"} -) - -func TestBalancerGetUnblocking(t *testing.T) { - sb := newSimpleBalancer(endpoints) - defer sb.Close() - if addrs := <-sb.Notify(); len(addrs) != len(endpoints) { - t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't") - } - unblockingOpts := grpc.BalancerGetOptions{BlockingWait: false} - - _, _, err := sb.Get(context.Background(), unblockingOpts) - if err != ErrNoAddrAvilable { - t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err) - } - - down1 := sb.Up(grpc.Address{Addr: endpoints[1]}) - if addrs := <-sb.Notify(); len(addrs) != 1 { - t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed") - } - down2 := sb.Up(grpc.Address{Addr: endpoints[2]}) - addrFirst, putFun, err := sb.Get(context.Background(), unblockingOpts) - if err != nil { - t.Errorf("Get() with up endpoints should success, got %v", err) - } - if addrFirst.Addr != endpoints[1] { - t.Errorf("Get() didn't return expected address, got %v", addrFirst) - } - if putFun == nil { - t.Errorf("Get() returned unexpected nil put function") - } - addrSecond, _, _ := sb.Get(context.Background(), unblockingOpts) - if addrFirst.Addr != addrSecond.Addr { - t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond) - } - - down1(errors.New("error")) - if addrs := <-sb.Notify(); len(addrs) != len(endpoints) { - t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection") - } - down2(errors.New("error")) - _, _, err = sb.Get(context.Background(), unblockingOpts) - if err != ErrNoAddrAvilable { - t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err) - } -} - -func TestBalancerGetBlocking(t *testing.T) { - sb := newSimpleBalancer(endpoints) - defer sb.Close() - if addrs := <-sb.Notify(); len(addrs) != len(endpoints) { - t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't") - } - blockingOpts := grpc.BalancerGetOptions{BlockingWait: true} - - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*100) - _, _, err := sb.Get(ctx, blockingOpts) - if err != context.DeadlineExceeded { - t.Errorf("Get() with no up endpoints should timeout, got %v", err) - } - - downC := make(chan func(error), 1) - - go func() { - // ensure sb.Up() will be called after sb.Get() to see if Up() releases blocking Get() - time.Sleep(time.Millisecond * 100) - f := sb.Up(grpc.Address{Addr: endpoints[1]}) - if addrs := <-sb.Notify(); len(addrs) != 1 { - t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed") - } - downC <- f - }() - addrFirst, putFun, err := sb.Get(context.Background(), blockingOpts) - if err != nil { - t.Errorf("Get() with up endpoints should success, got %v", err) - } - if addrFirst.Addr != endpoints[1] { - t.Errorf("Get() didn't return expected address, got %v", addrFirst) - } - if putFun == nil { - t.Errorf("Get() returned unexpected nil put function") - } - down1 := <-downC - - down2 := sb.Up(grpc.Address{Addr: endpoints[2]}) - addrSecond, _, _ := sb.Get(context.Background(), blockingOpts) - if addrFirst.Addr != addrSecond.Addr { - t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond) - } - - down1(errors.New("error")) - if addrs := <-sb.Notify(); len(addrs) != len(endpoints) { - t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection") - } - down2(errors.New("error")) - ctx, _ = context.WithTimeout(context.Background(), time.Millisecond*100) - _, _, err = sb.Get(ctx, blockingOpts) - if err != context.DeadlineExceeded { - t.Errorf("Get() with no up endpoints should timeout, got %v", err) - } -} - -// TestBalancerDoNotBlockOnClose ensures that balancer and grpc don't deadlock each other -// due to rapid open/close conn. The deadlock causes balancer.Close() to block forever. -// See issue: https://github.com/coreos/etcd/issues/7283 for more detail. -func TestBalancerDoNotBlockOnClose(t *testing.T) { - defer testutil.AfterTest(t) - - kcl := newKillConnListener(t, 3) - defer kcl.close() - - for i := 0; i < 5; i++ { - sb := newSimpleBalancer(kcl.endpoints()) - conn, err := grpc.Dial("", grpc.WithInsecure(), grpc.WithBalancer(sb)) - if err != nil { - t.Fatal(err) - } - kvc := pb.NewKVClient(conn) - <-sb.readyc - - var wg sync.WaitGroup - wg.Add(100) - cctx, cancel := context.WithCancel(context.TODO()) - for j := 0; j < 100; j++ { - go func() { - defer wg.Done() - kvc.Range(cctx, &pb.RangeRequest{}, grpc.FailFast(false)) - }() - } - // balancer.Close() might block - // if balancer and grpc deadlock each other. - bclosec, cclosec := make(chan struct{}), make(chan struct{}) - go func() { - defer close(bclosec) - sb.Close() - }() - go func() { - defer close(cclosec) - conn.Close() - }() - select { - case <-bclosec: - case <-time.After(3 * time.Second): - testutil.FatalStack(t, "balancer close timeout") - } - select { - case <-cclosec: - case <-time.After(3 * time.Second): - t.Fatal("grpc conn close timeout") - } - - cancel() - wg.Wait() - } -} - -// killConnListener listens incoming conn and kills it immediately. -type killConnListener struct { - wg sync.WaitGroup - eps []string - stopc chan struct{} - t *testing.T -} - -func newKillConnListener(t *testing.T, size int) *killConnListener { - kcl := &killConnListener{stopc: make(chan struct{}), t: t} - - for i := 0; i < size; i++ { - ln, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatal(err) - } - kcl.eps = append(kcl.eps, ln.Addr().String()) - kcl.wg.Add(1) - go kcl.listen(ln) - } - return kcl -} - -func (kcl *killConnListener) endpoints() []string { - return kcl.eps -} - -func (kcl *killConnListener) listen(l net.Listener) { - go func() { - defer kcl.wg.Done() - for { - conn, err := l.Accept() - select { - case <-kcl.stopc: - return - default: - } - if err != nil { - kcl.t.Fatal(err) - } - time.Sleep(1 * time.Millisecond) - conn.Close() - } - }() - <-kcl.stopc - l.Close() -} - -func (kcl *killConnListener) close() { - close(kcl.stopc) - kcl.wg.Wait() -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index f220a895ef2a..2bdd928771f1 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -31,7 +31,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) var ( @@ -51,21 +53,22 @@ type Client struct { conn *grpc.ClientConn dialerrc chan error - cfg Config - creds *credentials.TransportCredentials - balancer *simpleBalancer - retryWrapper retryRpcFunc - retryAuthWrapper retryRpcFunc + cfg Config + creds *credentials.TransportCredentials + balancer *healthBalancer + mu *sync.Mutex ctx context.Context cancel context.CancelFunc - // Username is a username for authentication + // Username is a user name for authentication. Username string - // Password is a password for authentication + // Password is a password for authentication. Password string // tokenCred is an instance of WithPerRPCCredentials()'s argument tokenCred *authTokenCredential + + callOpts []grpc.CallOption } // New creates a new etcdv3 client from a given configuration. @@ -116,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) { // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() c.cfg.Endpoints = eps - c.balancer.updateAddrs(eps) + c.mu.Unlock() + c.balancer.updateAddrs(eps...) + + // updating notifyCh can trigger new connections, + // need update addrs if all connections are down + // or addrs does not include pinAddr. + c.balancer.mu.RLock() + update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) + c.balancer.mu.RUnlock() + if update { + select { + case c.balancer.updateAddrsC <- notifyNext: + case <-c.balancer.stopc: + } + } } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. @@ -144,8 +162,10 @@ func (c *Client) autoSync() { case <-c.ctx.Done(): return case <-time.After(c.cfg.AutoSyncInterval): - ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) - if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { logger.Println("Auto sync endpoints failed:", err) } } @@ -174,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = endpoint url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { - return + return proto, host, scheme } scheme = url.Scheme @@ -188,7 +208,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { default: proto, host = "", "" } - return + return proto, host, scheme } func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { @@ -207,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden default: creds = nil } - return + return creds } // dialSetupOpts gives the dial opts prior to any authentication @@ -215,10 +235,17 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) if host == "" && endpoint != "" { // dialing an endpoint not in the balancer; use // endpoint passed into dial @@ -270,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error { endpoint := c.cfg.Endpoints[i] host := getHost(endpoint) // use dial options without dopts to avoid reusing the client balancer - auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) + auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c) if err != nil { continue } @@ -311,7 +338,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo if err != nil { if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = grpc.ErrClientConnTimeout + err = context.DeadlineExceeded } return nil, err } @@ -333,7 +360,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo // when the cluster has a leader. func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewContext(ctx, md) + return metadata.NewOutgoingContext(ctx, md) } func newClient(cfg *Config) (*Client, error) { @@ -360,15 +387,37 @@ func newClient(cfg *Config) (*Client, error) { creds: creds, ctx: ctx, cancel: cancel, + mu: new(sync.Mutex), + callOpts: defaultCallOpts, } if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + + client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { + return grpcHealthCheck(client, ep) + }) - client.balancer = newSimpleBalancer(cfg.Endpoints) // use Endpoints[0] so that for https:// without any tls config given, then - // grpc will assume the ServerName is in the endpoint. + // grpc will assume the certificate server name is the endpoint host. conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) if err != nil { client.cancel() @@ -376,21 +425,19 @@ func newClient(cfg *Config) (*Client, error) { return nil, err } client.conn = conn - client.retryWrapper = client.newRetryWrapper() - client.retryAuthWrapper = client.newAuthRetryWrapper() // wait for a connection if cfg.DialTimeout > 0 { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.readyc: + case <-client.balancer.ready(): hasConn = true case <-ctx.Done(): case <-waitc: } if !hasConn { - err := grpc.ErrClientConnTimeout + err := context.DeadlineExceeded select { case err = <-client.dialerrc: default: @@ -425,7 +472,7 @@ func (c *Client) checkVersion() (err error) { errc := make(chan error, len(c.cfg.Endpoints)) ctx, cancel := context.WithCancel(c.ctx) if c.cfg.DialTimeout > 0 { - ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout) + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) } wg.Add(len(c.cfg.Endpoints)) for _, ep := range c.cfg.Endpoints { @@ -440,7 +487,7 @@ func (c *Client) checkVersion() (err error) { vs := strings.Split(resp.Version, ".") maj, min := 0, 0 if len(vs) >= 2 { - maj, rerr = strconv.Atoi(vs[0]) + maj, _ = strconv.Atoi(vs[0]) min, rerr = strconv.Atoi(vs[1]) } if maj < 3 || (maj == 3 && min < 2) { @@ -472,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool { if err == nil { return false } - code := grpc.Code(err) + ev, _ := status.FromError(err) // Unavailable codes mean the system will be right back. // (e.g., can't connect, lost leader) // Treat Internal codes as if something failed, leaving the // system in an inconsistent state, but retrying could make progress. // (e.g., failed in middle of send, corrupted frame) // TODO: are permanent Internal errors possible from grpc? - return code != codes.Unavailable && code != codes.Internal + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal } func toErr(ctx context.Context, err error) error { @@ -490,7 +537,8 @@ func toErr(ctx context.Context, err error) error { if _, ok := err.(rpctypes.EtcdError); ok { return err } - code := grpc.Code(err) + ev, _ := status.FromError(err) + code := ev.Code() switch code { case codes.DeadlineExceeded: fallthrough @@ -499,7 +547,6 @@ func toErr(ctx context.Context, err error) error { err = ctx.Err() } case codes.Unavailable: - err = ErrNoAvailableEndpoints case codes.FailedPrecondition: err = grpc.ErrClientConnClosing } diff --git a/vendor/github.com/coreos/etcd/clientv3/client_test.go b/vendor/github.com/coreos/etcd/clientv3/client_test.go index dd3e144b7fed..0b5648573abc 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/client_test.go @@ -22,8 +22,8 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" - "google.golang.org/grpc" ) func TestDialCancel(t *testing.T) { @@ -45,7 +45,7 @@ func TestDialCancel(t *testing.T) { t.Fatal(err) } - // connect to ipv4 blackhole so dial blocks + // connect to ipv4 black hole so dial blocks c.SetEndpoints("http://254.0.0.1:12345") // issue Get to force redial attempts @@ -97,7 +97,7 @@ func TestDialTimeout(t *testing.T) { for i, cfg := range testCfgs { donec := make(chan error) go func() { - // without timeout, dial continues forever on ipv4 blackhole + // without timeout, dial continues forever on ipv4 black hole c, err := New(cfg) if c != nil || err == nil { t.Errorf("#%d: new client should fail", i) @@ -117,8 +117,8 @@ func TestDialTimeout(t *testing.T) { case <-time.After(5 * time.Second): t.Errorf("#%d: failed to timeout dial on time", i) case err := <-donec: - if err != grpc.ErrClientConnTimeout { - t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout) + if err != context.DeadlineExceeded { + t.Errorf("#%d: unexpected error %v, want %v", i, err, context.DeadlineExceeded) } } } diff --git a/vendor/github.com/coreos/etcd/clientv3/clientv3util/example_key_test.go b/vendor/github.com/coreos/etcd/clientv3/clientv3util/example_key_test.go index ed6d0f6955bc..e720723e3d48 100644 --- a/vendor/github.com/coreos/etcd/clientv3/clientv3util/example_key_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/clientv3util/example_key_test.go @@ -15,11 +15,12 @@ package clientv3util_test import ( - "context" "log" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/clientv3util" + + "golang.org/x/net/context" ) func ExampleKeyExists_put() { @@ -33,7 +34,7 @@ func ExampleKeyExists_put() { kvc := clientv3.NewKV(cli) // perform a put only if key is missing - // It is useful to do the check (transactionally) to avoid overwriting + // It is useful to do the check atomically to avoid overwriting // the existing key which would generate potentially unwanted events, // unless of course you wanted to do an overwrite no matter what. _, err = kvc.Txn(context.Background()). diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index 922d900e3c3d..93637ddc570c 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -16,6 +16,8 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/types" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -43,20 +45,34 @@ type Cluster interface { } type cluster struct { - remote pb.ClusterClient + remote pb.ClusterClient + callOpts []grpc.CallOption } func NewCluster(c *Client) Cluster { - return &cluster{remote: RetryClusterClient(c)} + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api } -func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster { - return &cluster{remote: remote} +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r) + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -65,7 +81,7 @@ func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAdd func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r) + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -73,28 +89,25 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes } func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + // it is safe to retry on update. - for { - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil } + return nil, toErr(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. - for { - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) - if err == nil { - return (*MemberListResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil } + return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go index 32d97eb0cc1c..41e80c1da5d4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest { return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} } -// WithCompactPhysical makes compact RPC call wait until -// the compaction is physically applied to the local database -// such that compacted entries are totally removed from the -// backend database. +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. func WithCompactPhysical() CompactOption { return func(op *CompactOp) { op.physical = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go index c55228cc0b88..68a25fd800fb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -99,6 +99,7 @@ func (cmp *Cmp) ValueBytes() []byte { // WithValueBytes sets the byte slice for the comparison's value. func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v @@ -108,3 +109,12 @@ func mustInt64(val interface{}) int64 { } panic("bad value") } + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go index 1d75dde3d829..c092bde0aeb6 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -21,6 +21,7 @@ import ( v3 "github.com/coreos/etcd/clientv3" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/mvcc/mvccpb" + "golang.org/x/net/context" ) @@ -185,12 +186,12 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { cancel() return } - // only accept PUTs; a DELETE will make observe() spin + // only accept puts; a delete will make observe() spin for _, ev := range wr.Events { if ev.Type == mvccpb.PUT { hdr, kv = &wr.Header, ev.Kv // may have multiple revs; hdr.rev = the last rev - // set to kv's rev in case batch has multiple PUTs + // set to kv's rev in case batch has multiple Puts hdr.Revision = kv.ModRevision break } @@ -213,6 +214,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { for !keyDeleted { wr, ok := <-wch if !ok { + cancel() return } for _, ev := range wr.Events { @@ -225,6 +227,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { select { case ch <- *resp: case <-cctx.Done(): + cancel() return } } @@ -240,4 +243,4 @@ func (e *Election) Key() string { return e.leaderKey } func (e *Election) Rev() int64 { return e.leaderRev } // Header is the response header from the last successful election proposal. -func (m *Election) Header() *pb.ResponseHeader { return m.hdr } +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go index cf006d70e671..9936737756ca 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go @@ -20,6 +20,7 @@ import ( v3 "github.com/coreos/etcd/clientv3" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/mvcc/mvccpb" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go index cee15db7b338..736a9d3d353f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -20,6 +20,7 @@ import ( v3 "github.com/coreos/etcd/clientv3" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" ) @@ -49,7 +50,9 @@ func (m *Mutex) Lock(ctx context.Context) error { put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) // reuse key in case this session already holds the lock get := v3.OpGet(m.myKey) - resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit() + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() if err != nil { return err } @@ -57,6 +60,12 @@ func (m *Mutex) Lock(ctx context.Context) error { if !resp.Succeeded { m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } // wait for deletion revisions prior to myKey hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go index 0cb5ea7cf1eb..55cb553ea4ad 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go @@ -18,6 +18,7 @@ import ( "time" v3 "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) @@ -53,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { ctx, cancel := context.WithCancel(ops.ctx) keepAlive, err := client.KeepAlive(ctx, id) if err != nil || keepAlive == nil { + cancel() return nil, err } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go index 8d0a64bacb99..6bfd70ec4286 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -18,6 +18,7 @@ import ( "math" v3 "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) @@ -46,7 +47,7 @@ const ( // SerializableSnapshot provides serializable isolation and also checks // for write conflicts. SerializableSnapshot Isolation = iota - // Serializable reads within the same transactiona attempt return data + // Serializable reads within the same transaction attempt return data // from the at the revision of the first read. Serializable // RepeatableReads reads within the same transaction attempt always @@ -85,7 +86,7 @@ func WithPrefetch(keys ...string) stmOption { return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } } -// NewSTM initiates a new STM instance, using snapshot isolation by default. +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { opts := &stmOptions{ctx: c.Ctx()} for _, f := range so { diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index dda72a748e61..fee12eaf60bb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -33,10 +33,31 @@ type Config struct { // DialTimeout is the timeout for failing to establish a connection. DialTimeout time.Duration `json:"dial-timeout"` + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int + // TLS holds the client secure credentials, if any. TLS *tls.Config - // Username is a username for authentication. + // Username is a user name for authentication. Username string `json:"username"` // Password is a password for authentication. diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go index 470ca4dc4769..dacc5bb346fc 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -28,7 +28,7 @@ // Make sure to close the client after using it. If the client is not closed, the // connection will have leaky goroutines. // -// To specify client request timeout, pass context.WithTimeout to APIs: +// To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) // resp, err := kvc.Put(ctx, "sample_key", "sample_value") diff --git a/vendor/github.com/coreos/etcd/clientv3/example_auth_test.go b/vendor/github.com/coreos/etcd/clientv3/example_auth_test.go deleted file mode 100644 index f07e5095c0c7..000000000000 --- a/vendor/github.com/coreos/etcd/clientv3/example_auth_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "fmt" - "log" - - "github.com/coreos/etcd/clientv3" - "golang.org/x/net/context" -) - -func ExampleAuth() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: endpoints, - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil { - log.Fatal(err) - } - if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil { - log.Fatal(err) - } - if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil { - log.Fatal(err) - } - - if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil { - log.Fatal(err) - } - - if _, err = cli.RoleGrantPermission( - context.TODO(), - "r", // role name - "foo", // key - "zoo", // range end - clientv3.PermissionType(clientv3.PermReadWrite), - ); err != nil { - log.Fatal(err) - } - if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil { - log.Fatal(err) - } - if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil { - log.Fatal(err) - } - if _, err = cli.AuthEnable(context.TODO()); err != nil { - log.Fatal(err) - } - - cliAuth, err := clientv3.New(clientv3.Config{ - Endpoints: endpoints, - DialTimeout: dialTimeout, - Username: "u", - Password: "123", - }) - if err != nil { - log.Fatal(err) - } - defer cliAuth.Close() - - if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil { - log.Fatal(err) - } - - _, err = cliAuth.Txn(context.TODO()). - If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")). - Then(clientv3.OpPut("zoo1", "XYZ")). - Else(clientv3.OpPut("zoo1", "ABC")). - Commit() - fmt.Println(err) - - // now check the permission with the root account - rootCli, err := clientv3.New(clientv3.Config{ - Endpoints: endpoints, - DialTimeout: dialTimeout, - Username: "root", - Password: "123", - }) - if err != nil { - log.Fatal(err) - } - defer rootCli.Close() - - resp, err := rootCli.RoleGet(context.TODO(), "r") - if err != nil { - log.Fatal(err) - } - fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd) - - if _, err = rootCli.AuthDisable(context.TODO()); err != nil { - log.Fatal(err) - } - // Output: etcdserver: permission denied - // user u permission: key "foo", range end "zoo" -} diff --git a/vendor/github.com/coreos/etcd/clientv3/example_cluster_test.go b/vendor/github.com/coreos/etcd/clientv3/example_cluster_test.go index e9e516ba997f..ed57be6547f8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_cluster_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_cluster_test.go @@ -19,6 +19,7 @@ import ( "log" "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/example_kv_test.go b/vendor/github.com/coreos/etcd/clientv3/example_kv_test.go index 340b07811d34..ed9f4e1f8ed9 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_kv_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_kv_test.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "golang.org/x/net/context" ) @@ -236,8 +237,11 @@ func ExampleKV_txn() { ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) _, err = kvc.Txn(ctx). - If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical - Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc' + // txn value comparisons are lexical + If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). + // the "Then" runs, since "xyz" > "abc" + Then(clientv3.OpPut("key", "XYZ")). + // the "Else" does not run Else(clientv3.OpPut("key", "ABC")). Commit() cancel() diff --git a/vendor/github.com/coreos/etcd/clientv3/example_lease_test.go b/vendor/github.com/coreos/etcd/clientv3/example_lease_test.go index e1bd57a15d72..bad6d392dba1 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_lease_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_lease_test.go @@ -19,6 +19,7 @@ import ( "log" "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/example_maintenence_test.go b/vendor/github.com/coreos/etcd/clientv3/example_maintenence_test.go index 9753176aa5bf..67323a210ff7 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_maintenence_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_maintenence_test.go @@ -18,9 +18,8 @@ import ( "fmt" "log" - "golang.org/x/net/context" - "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) func ExampleMaintenance_status() { @@ -34,20 +33,15 @@ func ExampleMaintenance_status() { } defer cli.Close() - // resp, err := cli.Status(context.Background(), ep) - // - // or - // - mapi := clientv3.NewMaintenance(cli) - resp, err := mapi.Status(context.Background(), ep) + resp, err := cli.Status(context.Background(), ep) if err != nil { log.Fatal(err) } - fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader) + fmt.Printf("endpoint: %s / Leader: %v\n", ep, resp.Header.MemberId == resp.Leader) } - // endpoint: localhost:2379 / IsLeader: false - // endpoint: localhost:22379 / IsLeader: false - // endpoint: localhost:32379 / IsLeader: true + // endpoint: localhost:2379 / Leader: false + // endpoint: localhost:22379 / Leader: false + // endpoint: localhost:32379 / Leader: true } func ExampleMaintenance_defragment() { diff --git a/vendor/github.com/coreos/etcd/clientv3/example_metrics_test.go b/vendor/github.com/coreos/etcd/clientv3/example_metrics_test.go index 6e5fd5e0958c..67b959038ce9 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_metrics_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_metrics_test.go @@ -43,10 +43,10 @@ func ExampleClient_metrics() { } defer cli.Close() - // get a key so it shows up in the metrics as a range rpc + // get a key so it shows up in the metrics as a range RPC cli.Get(context.TODO(), "test_key") - // listen for all prometheus metrics + // listen for all Prometheus metrics ln, err := net.Listen("tcp", ":0") if err != nil { log.Fatal(err) @@ -61,7 +61,7 @@ func ExampleClient_metrics() { <-donec }() - // make an http request to fetch all prometheus metrics + // make an http request to fetch all Prometheus metrics url := "http://" + ln.Addr().String() + "/metrics" resp, err := http.Get(url) if err != nil { @@ -80,5 +80,6 @@ func ExampleClient_metrics() { break } } - // Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1 + // Output: + // grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1 } diff --git a/vendor/github.com/coreos/etcd/clientv3/example_test.go b/vendor/github.com/coreos/etcd/clientv3/example_test.go index 4e9b5042d99a..a6b7b27e8e91 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_test.go @@ -16,12 +16,14 @@ package clientv3_test import ( "log" + "os" "time" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" ) var ( @@ -31,8 +33,7 @@ var ( ) func Example() { - var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3") - clientv3.SetLogger(plog) + clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) cli, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, diff --git a/vendor/github.com/coreos/etcd/clientv3/example_watch_test.go b/vendor/github.com/coreos/etcd/clientv3/example_watch_test.go index b3a0f1dd2dfe..1d723e6163bd 100644 --- a/vendor/github.com/coreos/etcd/clientv3/example_watch_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/example_watch_test.go @@ -19,6 +19,7 @@ import ( "log" "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/grpc_options.go b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go new file mode 100644 index 000000000000..592dd6993cf6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go @@ -0,0 +1,46 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + + "google.golang.org/grpc" +) + +var ( + // Disable gRPC internal retrial logic + // TODO: enable when gRPC retry is stable (FailFast=false) + // Reference: + // - https://github.com/grpc/grpc-go/issues/1532 + // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md + defaultFailFast = grpc.FailFast(true) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 000000000000..52bea90e66ed --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,627 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "errors" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const ( + minHealthRetryDuration = 3 * time.Second + unknownService = "unknown service grpc.health.v1.Health" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") + +type healthCheckFunc func(ep string) (bool, error) + +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + +// healthBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type healthBalancer struct { + // addrs are the client's endpoint addresses for grpc + addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + unhealthyMu sync.RWMutex + unhealthyHostPorts map[string]time.Time + + // mu protects all fields below. + mu sync.RWMutex + + // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. + upc chan struct{} + + // downc closes when grpc calls down() on pinAddr + downc chan struct{} + + // stopc is closed to signal updateNotifyLoop should stop. + stopc chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + + // donec closes when all goroutines are exited + donec chan struct{} + + // updateAddrsC notifies updateNotifyLoop to update addrs. + updateAddrsC chan notifyMsg + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + hostPort2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // initialization and shutdown. + pinAddr string + + closed bool +} + +func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) + hb := &healthBalancer{ + addrs: addrs, + eps: eps, + notifyCh: notifyCh, + readyc: make(chan struct{}), + healthCheck: hc, + unhealthyHostPorts: make(map[string]time.Time), + upc: make(chan struct{}), + stopc: make(chan struct{}), + downc: make(chan struct{}), + donec: make(chan struct{}), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + close(hb.downc) + go hb.updateNotifyLoop() + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy() + }() + return hb +} + +func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *healthBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *healthBalancer) endpoint(hostPort string) string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.hostPort2ep[hostPort] +} + +func (b *healthBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func (b *healthBalancer) hostPortError(hostPort string, err error) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) + } + return + } + + b.unhealthyMu.Lock() + b.unhealthyHostPorts[hostPort] = time.Now() + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) + } +} + +func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) + } + return + } + + b.unhealthyMu.Lock() + delete(b.unhealthyHostPorts, hostPort) + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) + } +} + +func (b *healthBalancer) countUnhealthy() (count int) { + b.unhealthyMu.RLock() + count = len(b.unhealthyHostPorts) + b.unhealthyMu.RUnlock() + return count +} + +func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { + b.unhealthyMu.RLock() + _, unhealthy = b.unhealthyHostPorts[hostPort] + b.unhealthyMu.RUnlock() + return unhealthy +} + +func (b *healthBalancer) cleanupUnhealthy() { + b.unhealthyMu.Lock() + for k, v := range b.unhealthyHostPorts { + if time.Since(v) > b.healthCheckTimeout { + delete(b.unhealthyHostPorts, k) + if logger.V(4) { + logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) + } + } + } + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { + unhealthyCnt := b.countUnhealthy() + + b.mu.RLock() + defer b.mu.RUnlock() + + hbAddrs := b.addrs + if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { + liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) + for k := range b.hostPort2ep { + liveHostPorts[k] = struct{}{} + } + return hbAddrs, liveHostPorts + } + + addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) + liveHostPorts := make(map[string]struct{}, len(addrs)) + for _, addr := range b.addrs { + if !b.isUnhealthy(addr.Addr) { + addrs = append(addrs, addr) + liveHostPorts[addr.Addr] = struct{}{} + } + } + return addrs, liveHostPorts +} + +func (b *healthBalancer) updateUnhealthy() { + for { + select { + case <-time.After(b.healthCheckTimeout): + b.cleanupUnhealthy() + pinned := b.pinned() + if pinned == "" || b.isUnhealthy(pinned) { + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + return + } + } + case <-b.stopc: + return + } + } +} + +func (b *healthBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.hostPort2ep) + if match { + for k, v := range np { + if b.hostPort2ep[k] != v { + match = false + break + } + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps + + b.unhealthyMu.Lock() + b.unhealthyHostPorts = make(map[string]time.Time) + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + +func (b *healthBalancer) updateNotifyLoop() { + defer close(b.donec) + + for { + b.mu.RLock() + upc, downc, addr := b.upc, b.downc, b.pinAddr + b.mu.RUnlock() + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } + select { + case <-upc: + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: + } + case downc == nil: + b.notifyAddrs(notifyReset) + select { + case <-upc: + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address + case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + } + } +} + +func (b *healthBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } + b.mu.RLock() + pinAddr := b.pinAddr + downc := b.downc + b.mu.RUnlock() + addrs, hostPorts := b.liveAddrs() + + var waitDown bool + if pinAddr != "" { + _, ok := hostPorts[pinAddr] + waitDown = !ok + } + + select { + case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } + case <-b.stopc: + } +} + +func (b *healthBalancer) Up(addr grpc.Address) func(error) { + if !b.mayPin(addr) { + return func(err error) {} + } + + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Otherwise, will panic + // if b.upc is already closed. + if b.closed { + return func(err error) {} + } + + // gRPC might call Up on a stale address. + // Prevent updating pinAddr with a stale address. + if !hasAddr(b.addrs, addr.Addr) { + return func(err error) {} + } + + if b.pinAddr != "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {} + } + + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } + + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + b.hostPortError(addr.Addr, err) + + b.mu.Lock() + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" + b.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + } +} + +func (b *healthBalancer) mayPin(addr grpc.Address) bool { + if b.endpoint(addr.Addr) == "" { // stale host:port + return false + } + + b.unhealthyMu.RLock() + unhealthyCnt := len(b.unhealthyHostPorts) + failedTime, bad := b.unhealthyHostPorts[addr.Addr] + b.unhealthyMu.RUnlock() + + b.mu.RLock() + skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt + b.mu.RUnlock() + if skip || !bad { + return true + } + + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) + } + return false + } + + if ok, _ := b.healthCheck(addr.Addr); ok { + b.removeUnhealthy(addr.Addr, "health check success") + return true + } + + b.hostPortError(addr.Addr, errors.New("health check failed")) + return false +} + +func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var ( + addr string + closed bool + ) + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr == "" { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr != "" { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *healthBalancer) Close() error { + b.mu.Lock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + b.mu.Unlock() + <-b.donec + return nil + } + b.closed = true + b.stopOnce.Do(func() { close(b.stopc) }) + b.pinAddr = "" + + // In the case of following scenario: + // 1. upc is not closed; no pinned address + // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks + // 3. client.conn.Close() calls balancer.Close(); closed = true + // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled + // we must close upc so Get() exits from blocking on upc + select { + case <-b.upc: + default: + // terminate all waiting Get()s + close(b.upc) + } + + b.mu.Unlock() + b.wg.Wait() + + // wait for updateNotifyLoop to finish + <-b.donec + close(b.notifyCh) + + return nil +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} + +func hasAddr(addrs []grpc.Address, targetAddr string) bool { + for _, addr := range addrs { + if targetAddr == addr.Addr { + return true + } + } + return false +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} + +func getHostPort2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/black_hole_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/black_hole_test.go new file mode 100644 index 000000000000..ac372062cce0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/integration/black_hole_test.go @@ -0,0 +1,211 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !cluster_proxy + +package integration + +import ( + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/integration" + "github.com/coreos/etcd/pkg/testutil" + + "golang.org/x/net/context" +) + +// TestBalancerUnderBlackholeKeepAliveWatch tests when watch discovers it cannot talk to +// blackholed endpoint, client balancer switches to healthy one. +// TODO: test server-to-client keepalive ping +func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 2, + GRPCKeepAliveMinTime: 1 * time.Millisecond, // avoid too_many_pings + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()} + + ccfg := clientv3.Config{ + Endpoints: []string{eps[0]}, + DialTimeout: 1 * time.Second, + DialKeepAliveTime: 1 * time.Second, + DialKeepAliveTimeout: 500 * time.Millisecond, + } + + // gRPC internal implementation related. + pingInterval := ccfg.DialKeepAliveTime + ccfg.DialKeepAliveTimeout + // 3s for slow machine to process watch and reset connections + // TODO: only send healthy endpoint to gRPC so gRPC wont waste time to + // dial for unhealthy endpoint. + // then we can reduce 3s to 1s. + timeout := pingInterval + 3*time.Second + + cli, err := clientv3.New(ccfg) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify()) + if _, ok := <-wch; !ok { + t.Fatalf("watch failed on creation") + } + + // endpoint can switch to eps[1] when it detects the failure of eps[0] + cli.SetEndpoints(eps...) + + clus.Members[0].Blackhole() + + if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil { + t.Fatal(err) + } + select { + case <-wch: + case <-time.After(timeout): + t.Error("took too long to receive watch events") + } + + clus.Members[0].Unblackhole() + + // waiting for moving eps[0] out of unhealthy, so that it can be re-pined. + time.Sleep(ccfg.DialTimeout) + + clus.Members[1].Blackhole() + + // make sure client[0] can connect to eps[0] after remove the blackhole. + if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil { + t.Fatal(err) + } + if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil { + t.Fatal(err) + } + + select { + case <-wch: + case <-time.After(timeout): + t.Error("took too long to receive watch events") + } +} + +func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) { + testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Put(ctx, "foo", "bar") + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }) +} + +func TestBalancerUnderBlackholeNoKeepAliveDelete(t *testing.T) { + testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Delete(ctx, "foo") + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }) +} + +func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) { + testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Txn(ctx). + If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). + Then(clientv3.OpPut("foo", "bar")). + Else(clientv3.OpPut("foo", "baz")).Commit() + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }) +} + +func TestBalancerUnderBlackholeNoKeepAliveLinearizableGet(t *testing.T) { + testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "a") + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }) +} + +func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) { + testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "a", clientv3.WithSerializable()) + if err == context.DeadlineExceeded { + return errExpected + } + return err + }) +} + +// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint +// fails due to context timeout, but succeeds on next try, with endpoint switch. +func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 2, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()} + + ccfg := clientv3.Config{ + Endpoints: []string{eps[0]}, + DialTimeout: 1 * time.Second, + } + cli, err := clientv3.New(ccfg) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // wait for eps[0] to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps...) + + // blackhole eps[0] + clus.Members[0].Blackhole() + + // fail first due to blackhole, retry should succeed + // TODO: first operation can succeed + // when gRPC supports better retry on non-delivered request + for i := 0; i < 2; i++ { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err = op(cli, ctx) + cancel() + if err == nil { + break + } + if i == 0 { + if err != errExpected { + t.Errorf("#%d: expected %v, got %v", i, errExpected, err) + } + } else if err != nil { + t.Errorf("#%d: failed with error %v", i, err) + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/cluster_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/cluster_test.go index 6397f8a8a264..bf2bf788d07d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/cluster_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/cluster_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/pkg/types" + "golang.org/x/net/context" ) @@ -126,3 +127,36 @@ func TestMemberUpdate(t *testing.T) { t.Errorf("urls = %v, want %v", urls, resp.Members[0].PeerURLs) } } + +func TestMemberAddUpdateWrongURLs(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + capi := clus.RandClient() + tt := [][]string{ + // missing protocol scheme + {"://127.0.0.1:2379"}, + // unsupported scheme + {"mailto://127.0.0.1:2379"}, + // not conform to host:port + {"http://127.0.0.1"}, + // contain a path + {"http://127.0.0.1:2379/path"}, + // first path segment in URL cannot contain colon + {"127.0.0.1:1234"}, + // URL scheme must be http, https, unix, or unixs + {"localhost:1234"}, + } + for i := range tt { + _, err := capi.MemberAdd(context.Background(), tt[i]) + if err == nil { + t.Errorf("#%d: MemberAdd err = nil, but error", i) + } + _, err = capi.MemberUpdate(context.Background(), 0, tt[i]) + if err == nil { + t.Errorf("#%d: MemberUpdate err = nil, but error", i) + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/dial_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/dial_test.go index 4d8075de2592..aba0e2eea3a4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/dial_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/dial_test.go @@ -16,6 +16,7 @@ package integration import ( "math/rand" + "strings" "testing" "time" @@ -26,7 +27,6 @@ import ( "github.com/coreos/etcd/pkg/transport" "golang.org/x/net/context" - "google.golang.org/grpc" ) var ( @@ -48,21 +48,21 @@ var ( // TestDialTLSExpired tests client with expired certs fails to dial. func TestDialTLSExpired(t *testing.T) { defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) defer clus.Terminate(t) tls, err := testTLSInfoExpired.ClientConfig() if err != nil { t.Fatal(err) } - // expect remote errors 'tls: bad certificate' + // expect remote errors "tls: bad certificate" _, err = clientv3.New(clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCAddr()}, DialTimeout: 3 * time.Second, TLS: tls, }) - if err != grpc.ErrClientConnTimeout { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err) + if err != context.DeadlineExceeded { + t.Fatalf("expected %v, got %v", context.DeadlineExceeded, err) } } @@ -70,19 +70,20 @@ func TestDialTLSExpired(t *testing.T) { // when TLS endpoints (https, unixs) are given but no tls config. func TestDialTLSNoConfig(t *testing.T) { defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true}) defer clus.Terminate(t) - // expect 'signed by unknown authority' + // expect "signed by unknown authority" _, err := clientv3.New(clientv3.Config{ Endpoints: []string{clus.Members[0].GRPCAddr()}, DialTimeout: time.Second, }) - if err != grpc.ErrClientConnTimeout { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err) + if err != context.DeadlineExceeded { + t.Fatalf("expected %v, got %v", context.DeadlineExceeded, err) } } -// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. +// TestDialSetEndpointsBeforeFail ensures SetEndpoints can replace unavailable +// endpoints with available ones. func TestDialSetEndpointsBeforeFail(t *testing.T) { testDialSetEndpoints(t, true) } @@ -94,7 +95,7 @@ func TestDialSetEndpointsAfterFail(t *testing.T) { // testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. func testDialSetEndpoints(t *testing.T, setBefore bool) { defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true}) defer clus.Terminate(t) // get endpoint list @@ -139,7 +140,7 @@ func TestSwitchSetEndpoints(t *testing.T) { eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} cli := clus.Client(0) - clus.Members[0].InjectPartition(t, clus.Members[1:]) + clus.Members[0].InjectPartition(t, clus.Members[1:]...) cli.SetEndpoints(eps...) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -152,7 +153,7 @@ func TestSwitchSetEndpoints(t *testing.T) { func TestRejectOldCluster(t *testing.T) { defer testutil.AfterTest(t) // 2 endpoints to test multi-endpoint Status - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ @@ -182,10 +183,24 @@ func TestDialForeignEndpoint(t *testing.T) { // grpc can return a lazy connection that's not connected yet; confirm // that it can communicate with the cluster. - kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn)) + kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), clus.Client(0)) ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) defer cancel() if _, gerr := kvc.Get(ctx, "abc"); gerr != nil { t.Fatal(err) } } + +// TestSetEndpointAndPut checks that a Put following a SetEndpoints +// to a working endpoint will always succeed. +func TestSetEndpointAndPut(t *testing.T) { + defer testutil.AfterTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) + defer clus.Terminate(t) + + clus.Client(1).SetEndpoints(clus.Members[0].GRPCAddr()) + _, err := clus.Client(1).Put(context.TODO(), "foo", "bar") + if err != nil && !strings.Contains(err.Error(), "closing") { + t.Fatal(err) + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/kv_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/kv_test.go index 49f107f76df0..3acfe09a11c3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/kv_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/kv_test.go @@ -16,7 +16,6 @@ package integration import ( "bytes" - "math/rand" "os" "reflect" "strings" @@ -28,8 +27,10 @@ import ( "github.com/coreos/etcd/integration" "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) func TestKVPutError(t *testing.T) { @@ -39,7 +40,7 @@ func TestKVPutError(t *testing.T) { maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go quota = int64(int(maxReqBytes) + 8*os.Getpagesize()) ) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024}) defer clus.Terminate(t) kv := clus.RandClient() @@ -441,8 +442,8 @@ func TestKVGetErrConnClosed(t *testing.T) { go func() { defer close(donec) _, err := cli.Get(context.TODO(), "foo") - if err != nil && err != grpc.ErrClientConnClosing { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err) + if err != nil && err != context.Canceled && err != grpc.ErrClientConnClosing { + t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err) } }() @@ -472,8 +473,9 @@ func TestKVNewAfterClose(t *testing.T) { donec := make(chan struct{}) go func() { - if _, err := cli.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err) + _, err := cli.Get(context.TODO(), "foo") + if err != context.Canceled && err != grpc.ErrClientConnClosing { + t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err) } close(donec) }() @@ -790,7 +792,7 @@ func TestKVGetStoppedServerAndClose(t *testing.T) { // this Get fails and triggers an asynchronous connection retry _, err := cli.Get(ctx, "abc") cancel() - if !strings.Contains(err.Error(), "context deadline") { + if err != nil && err != context.DeadlineExceeded { t.Fatal(err) } } @@ -812,86 +814,143 @@ func TestKVPutStoppedServerAndClose(t *testing.T) { // grpc finds out the original connection is down due to the member shutdown. _, err := cli.Get(ctx, "abc") cancel() - if !strings.Contains(err.Error(), "context deadline") { + if err != nil && err != context.DeadlineExceeded { t.Fatal(err) } // this Put fails and triggers an asynchronous connection retry _, err = cli.Put(ctx, "abc", "123") cancel() - if !strings.Contains(err.Error(), "context deadline") { + if err != nil && err != context.DeadlineExceeded { t.Fatal(err) } } -// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down -func TestKVPutOneEndpointDown(t *testing.T) { +// TestKVPutAtMostOnce ensures that a Put will only occur at most once +// in the presence of network errors. +func TestKVPutAtMostOnce(t *testing.T) { defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - // get endpoint list - eps := make([]string, 3) - for i := range eps { - eps[i] = clus.Members[i].GRPCAddr() + if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil { + t.Fatal(err) } - // make a dead node - clus.Members[rand.Intn(len(eps))].Stop(t) + for i := 0; i < 10; i++ { + clus.Members[0].DropConnections() + donec := make(chan struct{}) + go func() { + defer close(donec) + for i := 0; i < 10; i++ { + clus.Members[0].DropConnections() + time.Sleep(5 * time.Millisecond) + } + }() + _, err := clus.Client(0).Put(context.TODO(), "k", "v") + <-donec + if err != nil { + break + } + } - // try to connect with dead node in the endpoint list - cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second} - cli, err := clientv3.New(cfg) + resp, err := clus.Client(0).Get(context.TODO(), "k") if err != nil { t.Fatal(err) } - defer cli.Close() - ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second) - if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil { - t.Fatal(err) + if resp.Kvs[0].Version > 11 { + t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0]) } - cancel() } -// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other -// endpoints are down, then it will reconnect. -func TestKVGetResetLoneEndpoint(t *testing.T) { +// TestKVLargeRequests tests various client/server side request limits. +func TestKVLargeRequests(t *testing.T) { defer testutil.AfterTest(t) - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) - defer clus.Terminate(t) + tests := []struct { + // make sure that "MaxCallSendMsgSize" < server-side default send/recv limit + maxRequestBytesServer uint + maxCallSendBytesClient int + maxCallRecvBytesClient int - // get endpoint list - eps := make([]string, 2) - for i := range eps { - eps[i] = clus.Members[i].GRPCAddr() - } + valueSize int + expectError error + }{ + { + maxRequestBytesServer: 1, + maxCallSendBytesClient: 0, + maxCallRecvBytesClient: 0, + valueSize: 1024, + expectError: rpctypes.ErrRequestTooLarge, + }, - cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond} - cli, err := clientv3.New(cfg) - if err != nil { - t.Fatal(err) + // without proper client-side receive size limit + // "code = ResourceExhausted desc = grpc: received message larger than max (5242929 vs. 4194304)" + { + + maxRequestBytesServer: 7*1024*1024 + 512*1024, + maxCallSendBytesClient: 7 * 1024 * 1024, + maxCallRecvBytesClient: 0, + valueSize: 5 * 1024 * 1024, + expectError: nil, + }, + + { + maxRequestBytesServer: 10 * 1024 * 1024, + maxCallSendBytesClient: 100 * 1024 * 1024, + maxCallRecvBytesClient: 0, + valueSize: 10 * 1024 * 1024, + expectError: rpctypes.ErrRequestTooLarge, + }, + { + maxRequestBytesServer: 10 * 1024 * 1024, + maxCallSendBytesClient: 10 * 1024 * 1024, + maxCallRecvBytesClient: 0, + valueSize: 10 * 1024 * 1024, + expectError: grpc.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max "), + }, + { + maxRequestBytesServer: 10 * 1024 * 1024, + maxCallSendBytesClient: 100 * 1024 * 1024, + maxCallRecvBytesClient: 0, + valueSize: 10*1024*1024 + 5, + expectError: rpctypes.ErrRequestTooLarge, + }, + { + maxRequestBytesServer: 10 * 1024 * 1024, + maxCallSendBytesClient: 10 * 1024 * 1024, + maxCallRecvBytesClient: 0, + valueSize: 10*1024*1024 + 5, + expectError: grpc.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max "), + }, } - defer cli.Close() + for i, test := range tests { + clus := integration.NewClusterV3(t, + &integration.ClusterConfig{ + Size: 1, + MaxRequestBytes: test.maxRequestBytesServer, + ClientMaxCallSendMsgSize: test.maxCallSendBytesClient, + ClientMaxCallRecvMsgSize: test.maxCallRecvBytesClient, + }, + ) + cli := clus.Client(0) + _, err := cli.Put(context.TODO(), "foo", strings.Repeat("a", test.valueSize)) - // disconnect everything - clus.Members[0].Stop(t) - clus.Members[1].Stop(t) + if _, ok := err.(rpctypes.EtcdError); ok { + if err != test.expectError { + t.Errorf("#%d: expected %v, got %v", i, test.expectError, err) + } + } else if err != nil && !strings.HasPrefix(err.Error(), test.expectError.Error()) { + t.Errorf("#%d: expected %v, got %v", i, test.expectError, err) + } - // have Get try to reconnect - donec := make(chan struct{}) - go func() { - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil { - t.Fatal(err) + // put request went through, now expects large response back + if err == nil { + _, err = cli.Get(context.TODO(), "foo") + if err != nil { + t.Errorf("#%d: get expected no error, got %v", i, err) + } } - cancel() - close(donec) - }() - time.Sleep(500 * time.Millisecond) - clus.Members[0].Restart(t) - select { - case <-time.After(10 * time.Second): - t.Fatalf("timed out waiting for Get") - case <-donec: + + clus.Terminate(t) } } diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/lease_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/lease_test.go index ef176fddfbb3..d209eb0e5615 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/lease_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/lease_test.go @@ -26,6 +26,7 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -233,7 +234,7 @@ type leaseCh struct { ch <-chan *clientv3.LeaseKeepAliveResponse } -// TestLeaseKeepAliveNotFound ensures a revoked lease won't stop other keep alives +// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases. func TestLeaseKeepAliveNotFound(t *testing.T) { defer testutil.AfterTest(t) @@ -286,8 +287,10 @@ func TestLeaseGrantErrConnClosed(t *testing.T) { go func() { defer close(donec) _, err := cli.Grant(context.TODO(), 5) - if err != nil && err != grpc.ErrClientConnClosing { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err) + if err != nil && err != grpc.ErrClientConnClosing && err != context.Canceled { + // grpc.ErrClientConnClosing if grpc-go balancer calls 'Get' after client.Close. + // context.Canceled if grpc-go balancer calls 'Get' with an inflight client.Close. + t.Fatalf("expected %v or %v, got %v", grpc.ErrClientConnClosing, context.Canceled, err) } }() @@ -316,8 +319,8 @@ func TestLeaseGrantNewAfterClose(t *testing.T) { donec := make(chan struct{}) go func() { - if _, err := cli.Grant(context.TODO(), 5); err != grpc.ErrClientConnClosing { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err) + if _, err := cli.Grant(context.TODO(), 5); err != context.Canceled && err != grpc.ErrClientConnClosing { + t.Fatalf("expected %v or %v, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err) } close(donec) }() @@ -348,8 +351,8 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { donec := make(chan struct{}) go func() { - if _, err := cli.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err) + if _, err := cli.Revoke(context.TODO(), leaseID); err != context.Canceled && err != grpc.ErrClientConnClosing { + t.Fatalf("expected %v or %v, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err) } close(donec) }() @@ -360,7 +363,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) { } } -// TestLeaseKeepAliveCloseAfterDisconnectExpire ensures the keep alive channel is closed +// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed // following a disconnection, lease revoke, then reconnect. func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { defer testutil.AfterTest(t) @@ -395,7 +398,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { clus.Members[0].Restart(t) - // some keep-alives may still be buffered; drain until close + // some responses may still be buffered; drain until close timer := time.After(time.Duration(kresp.TTL) * time.Second) for kresp != nil { select { @@ -482,7 +485,8 @@ func TestLeaseTimeToLive(t *testing.T) { clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - lapi := clus.RandClient() + c := clus.RandClient() + lapi := c resp, err := lapi.Grant(context.Background(), 10) if err != nil { @@ -497,6 +501,11 @@ func TestLeaseTimeToLive(t *testing.T) { } } + // linearized read to ensure Puts propagated to server backing lapi + if _, err := c.Get(context.TODO(), "abc"); err != nil { + t.Fatal(err) + } + lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys()) if lerr != nil { t.Fatal(lerr) @@ -545,8 +554,7 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) { } lresp, err := cli.TimeToLive(context.Background(), resp.ID) - // TimeToLive() doesn't return LeaseNotFound error - // but return a response with TTL to be -1 + // TimeToLive() should return a response with TTL=-1. if err != nil { t.Fatalf("expected err to be nil") } @@ -636,8 +644,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) { } } -// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster -// before, during, and after quorum loss to confirm Grant/Keepalive tolerates +// TestV3LeaseFailureOverlap issues Grant and KeepAlive requests to a cluster +// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates // transient cluster failure. func TestV3LeaseFailureOverlap(t *testing.T) { clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/logger_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/logger_test.go index 71922da38089..bc0220db4b71 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/logger_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/logger_test.go @@ -14,8 +14,16 @@ package integration -import "github.com/coreos/pkg/capnslog" +import ( + "io/ioutil" + + "github.com/coreos/etcd/clientv3" + + "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc/grpclog" +) func init() { - capnslog.SetGlobalLogLevel(capnslog.INFO) + capnslog.SetGlobalLogLevel(capnslog.CRITICAL) + clientv3.SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) } diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/metrics_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/metrics_test.go index 6311fe0d77f2..dafb43dcae58 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/metrics_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/metrics_test.go @@ -44,7 +44,7 @@ func TestV3ClientMetrics(t *testing.T) { err error ) - // listen for all prometheus metrics + // listen for all Prometheus metrics donec := make(chan struct{}) go func() { defer close(donec) @@ -65,7 +65,7 @@ func TestV3ClientMetrics(t *testing.T) { url := "unix://" + addr + "/metrics" - clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true}) defer clus.Terminate(t) cfg := clientv3.Config{ diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/mirror_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/mirror_test.go index df9911efb7ea..fe2c5d0f739e 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/mirror_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/mirror_test.go @@ -25,6 +25,7 @@ import ( "github.com/coreos/etcd/integration" "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/namespace_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/namespace_test.go index b952d333d5b2..e83344a440d3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/namespace_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/namespace_test.go @@ -15,7 +15,6 @@ package integration import ( - "context" "reflect" "testing" @@ -24,6 +23,8 @@ import ( "github.com/coreos/etcd/integration" "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/testutil" + + "golang.org/x/net/context" ) func TestNamespacePutGet(t *testing.T) { diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/network_partition_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/network_partition_test.go new file mode 100644 index 000000000000..7241a9c54c06 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/integration/network_partition_test.go @@ -0,0 +1,260 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !cluster_proxy + +package integration + +import ( + "errors" + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/integration" + "github.com/coreos/etcd/pkg/testutil" + + "golang.org/x/net/context" +) + +var errExpected = errors.New("expected error") + +// TestBalancerUnderNetworkPartitionPut tests when one member becomes isolated, +// first Put request fails, and following retry succeeds with client balancer +// switching to others. +func TestBalancerUnderNetworkPartitionPut(t *testing.T) { + testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Put(ctx, "a", "b") + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }, time.Second) +} + +func TestBalancerUnderNetworkPartitionDelete(t *testing.T) { + testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Delete(ctx, "a") + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }, time.Second) +} + +func TestBalancerUnderNetworkPartitionTxn(t *testing.T) { + testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Txn(ctx). + If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). + Then(clientv3.OpPut("foo", "bar")). + Else(clientv3.OpPut("foo", "baz")).Commit() + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout { + return errExpected + } + return err + }, time.Second) +} + +// TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout tests +// when one member becomes isolated, first quorum Get request succeeds +// by switching endpoints within the timeout (long enough to cover endpoint switch). +func TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout(t *testing.T) { + testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "a") + return err + }, 7*time.Second) +} + +// TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout tests +// when one member becomes isolated, first quorum Get request fails, +// and following retry succeeds with client balancer switching to others. +func TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout(t *testing.T) { + testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "a") + if err == context.DeadlineExceeded { + return errExpected + } + return err + }, time.Second) +} + +func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) { + testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "a", clientv3.WithSerializable()) + return err + }, time.Second) +} + +func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + // expect pin eps[0] + ccfg := clientv3.Config{ + Endpoints: []string{eps[0]}, + DialTimeout: 3 * time.Second, + } + cli, err := clientv3.New(ccfg) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // wait for eps[0] to be pinned + mustWaitPinReady(t, cli) + + // add other endpoints for later endpoint switch + cli.SetEndpoints(eps...) + clus.Members[0].InjectPartition(t, clus.Members[1:]...) + + for i := 0; i < 2; i++ { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + err = op(cli, ctx) + cancel() + if err == nil { + break + } + if err != errExpected { + t.Errorf("#%d: expected %v, got %v", i, errExpected, err) + } + // give enough time for endpoint switch + // TODO: remove random sleep by syncing directly with balancer + if i == 0 { + time.Sleep(5 * time.Second) + } + } + if err != nil { + t.Errorf("balancer did not switch in time (%v)", err) + } +} + +// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer +// switches endpoint when leader fails and linearizable get requests returns +// "etcdserver: request timed out". +func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + lead := clus.WaitLeader(t) + + timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout() + + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{eps[(lead+1)%2]}, + DialTimeout: 1 * time.Second, + }) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // wait for non-leader to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps[lead], eps[(lead+1)%2]) + + // isolate leader + clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3]) + + // expects balancer endpoint switch while ongoing leader election + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + _, err = cli.Get(ctx, "a") + cancel() + if err != nil { + t.Fatal(err) + } +} + +func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) { + testBalancerUnderNetworkPartitionWatch(t, true) +} + +func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) { + testBalancerUnderNetworkPartitionWatch(t, false) +} + +// testBalancerUnderNetworkPartitionWatch ensures watch stream +// to a partitioned node be closed when context requires leader. +func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + target := clus.WaitLeader(t) + if !isolateLeader { + target = (target + 1) % 3 + } + + // pin eps[target] + watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}}) + if err != nil { + t.Fatal(err) + } + defer watchCli.Close() + + // wait for eps[target] to be pinned + mustWaitPinReady(t, watchCli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + watchCli.SetEndpoints(eps...) + + wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify()) + select { + case <-wch: + case <-time.After(3 * time.Second): + t.Fatal("took too long to create watch") + } + + // isolate eps[target] + clus.Members[target].InjectPartition(t, + clus.Members[(target+1)%3], + clus.Members[(target+2)%3], + ) + + select { + case ev := <-wch: + if len(ev.Events) != 0 { + t.Fatal("expected no event") + } + if err = ev.Err(); err != rpctypes.ErrNoLeader { + t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err) + } + case <-time.After(3 * time.Second): // enough time to detect leader lost + t.Fatal("took too long to detect leader lost") + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/role_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/role_test.go index 7814db5ebd64..ea1b63c8be2b 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/role_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/role_test.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/server_shutdown_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/server_shutdown_test.go new file mode 100644 index 000000000000..13e91c6855e5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/integration/server_shutdown_test.go @@ -0,0 +1,352 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "bytes" + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/integration" + "github.com/coreos/etcd/pkg/testutil" + + "golang.org/x/net/context" +) + +// TestBalancerUnderServerShutdownWatch expects that watch client +// switch its endpoints when the member of the pinned endpoint fails. +func TestBalancerUnderServerShutdownWatch(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + lead := clus.WaitLeader(t) + + // pin eps[lead] + watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[lead]}}) + if err != nil { + t.Fatal(err) + } + defer watchCli.Close() + + // wait for eps[lead] to be pinned + mustWaitPinReady(t, watchCli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + watchCli.SetEndpoints(eps...) + + key, val := "foo", "bar" + wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify()) + select { + case <-wch: + case <-time.After(3 * time.Second): + t.Fatal("took too long to create watch") + } + + donec := make(chan struct{}) + go func() { + defer close(donec) + + // switch to others when eps[lead] is shut down + select { + case ev := <-wch: + if werr := ev.Err(); werr != nil { + t.Fatal(werr) + } + if len(ev.Events) != 1 { + t.Fatalf("expected one event, got %+v", ev) + } + if !bytes.Equal(ev.Events[0].Kv.Value, []byte(val)) { + t.Fatalf("expected %q, got %+v", val, ev.Events[0].Kv) + } + case <-time.After(7 * time.Second): + t.Fatal("took too long to receive events") + } + }() + + // shut down eps[lead] + clus.Members[lead].Terminate(t) + + // writes to eps[lead+1] + putCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) + if err != nil { + t.Fatal(err) + } + defer putCli.Close() + for { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + _, err = putCli.Put(ctx, key, val) + cancel() + if err == nil { + break + } + if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail { + continue + } + t.Fatal(err) + } + + select { + case <-donec: + case <-time.After(5 * time.Second): // enough time for balancer switch + t.Fatal("took too long to receive events") + } +} + +func TestBalancerUnderServerShutdownPut(t *testing.T) { + testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Put(ctx, "foo", "bar") + return err + }) +} + +func TestBalancerUnderServerShutdownDelete(t *testing.T) { + testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Delete(ctx, "foo") + return err + }) +} + +func TestBalancerUnderServerShutdownTxn(t *testing.T) { + testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Txn(ctx). + If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). + Then(clientv3.OpPut("foo", "bar")). + Else(clientv3.OpPut("foo", "baz")).Commit() + return err + }) +} + +// testBalancerUnderServerShutdownMutable expects that when the member of +// the pinned endpoint is shut down, the balancer switches its endpoints +// and all subsequent put/delete/txn requests succeed with new endpoints. +func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + // pin eps[0] + cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}}) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // wait for eps[0] to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps...) + + // shut down eps[0] + clus.Members[0].Terminate(t) + + // switched to others when eps[0] was explicitly shut down + // and following request should succeed + // TODO: remove this (expose client connection state?) + time.Sleep(time.Second) + + cctx, ccancel := context.WithTimeout(context.Background(), time.Second) + err = op(cli, cctx) + ccancel() + if err != nil { + t.Fatal(err) + } +} + +func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) { + testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "foo") + return err + }, 7*time.Second) // give enough time for leader election, balancer switch +} + +func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) { + testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error { + _, err := cli.Get(ctx, "foo", clientv3.WithSerializable()) + return err + }, 2*time.Second) +} + +// testBalancerUnderServerShutdownImmutable expects that when the member of +// the pinned endpoint is shut down, the balancer switches its endpoints +// and all subsequent range requests succeed with new endpoints. +func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{ + Size: 3, + SkipCreatingClient: true, + }) + defer clus.Terminate(t) + + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()} + + // pin eps[0] + cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}}) + if err != nil { + t.Errorf("failed to create client: %v", err) + } + defer cli.Close() + + // wait for eps[0] to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps...) + + // shut down eps[0] + clus.Members[0].Terminate(t) + + // switched to others when eps[0] was explicitly shut down + // and following request should succeed + cctx, ccancel := context.WithTimeout(context.Background(), timeout) + err = op(cli, cctx) + ccancel() + if err != nil { + t.Errorf("failed to finish range request in time %v (timeout %v)", err, timeout) + } +} + +func TestBalancerUnderServerStopInflightLinearizableGetOnRestart(t *testing.T) { + tt := []pinTestOpt{ + {pinLeader: true, stopPinFirst: true}, + {pinLeader: true, stopPinFirst: false}, + {pinLeader: false, stopPinFirst: true}, + {pinLeader: false, stopPinFirst: false}, + } + for i := range tt { + testBalancerUnderServerStopInflightRangeOnRestart(t, true, tt[i]) + } +} + +func TestBalancerUnderServerStopInflightSerializableGetOnRestart(t *testing.T) { + tt := []pinTestOpt{ + {pinLeader: true, stopPinFirst: true}, + {pinLeader: true, stopPinFirst: false}, + {pinLeader: false, stopPinFirst: true}, + {pinLeader: false, stopPinFirst: false}, + } + for i := range tt { + testBalancerUnderServerStopInflightRangeOnRestart(t, false, tt[i]) + } +} + +type pinTestOpt struct { + pinLeader bool + stopPinFirst bool +} + +// testBalancerUnderServerStopInflightRangeOnRestart expects +// inflight range request reconnects on server restart. +func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) { + defer testutil.AfterTest(t) + + cfg := &integration.ClusterConfig{ + Size: 2, + SkipCreatingClient: true, + } + if linearizable { + cfg.Size = 3 + } + + clus := integration.NewClusterV3(t, cfg) + defer clus.Terminate(t) + eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()} + if linearizable { + eps = append(eps, clus.Members[2].GRPCAddr()) + } + + lead := clus.WaitLeader(t) + + target := lead + if !opt.pinLeader { + target = (target + 1) % 2 + } + + // pin eps[target] + cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}}) + if err != nil { + t.Errorf("failed to create client: %v", err) + } + defer cli.Close() + + // wait for eps[target] to be pinned + mustWaitPinReady(t, cli) + + // add all eps to list, so that when the original pined one fails + // the client can switch to other available eps + cli.SetEndpoints(eps...) + + if opt.stopPinFirst { + clus.Members[target].Stop(t) + // give some time for balancer switch before stopping the other + time.Sleep(time.Second) + clus.Members[(target+1)%2].Stop(t) + } else { + clus.Members[(target+1)%2].Stop(t) + // balancer cannot pin other member since it's already stopped + clus.Members[target].Stop(t) + } + + // 3-second is the minimum interval between endpoint being marked + // as unhealthy and being removed from unhealthy, so possibly + // takes >5-second to unpin and repin an endpoint + // TODO: decrease timeout when balancer switch rewrite + clientTimeout := 7 * time.Second + + var gops []clientv3.OpOption + if !linearizable { + gops = append(gops, clientv3.WithSerializable()) + } + + donec, readyc := make(chan struct{}), make(chan struct{}, 1) + go func() { + defer close(donec) + ctx, cancel := context.WithTimeout(context.TODO(), clientTimeout) + readyc <- struct{}{} + _, err := cli.Get(ctx, "abc", gops...) + cancel() + if err != nil { + t.Fatal(err) + } + }() + + <-readyc + clus.Members[target].Restart(t) + + select { + case <-time.After(clientTimeout + 3*time.Second): + t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt) + case <-donec: + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/txn_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/txn_test.go index b5cdea7e64f6..774410450a0a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/txn_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/txn_test.go @@ -24,6 +24,7 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" ) @@ -100,6 +101,8 @@ func TestTxnWriteFail(t *testing.T) { } func TestTxnReadRetry(t *testing.T) { + t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request") + defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) @@ -129,7 +132,6 @@ func TestTxnReadRetry(t *testing.T) { t.Fatalf("waited too long") } } - func TestTxnSuccess(t *testing.T) { defer testutil.AfterTest(t) diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/user_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/user_test.go index 09f352be0f1e..50e6fdb63cc9 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/user_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/user_test.go @@ -21,6 +21,7 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" ) @@ -62,7 +63,7 @@ func TestUserErrorAuth(t *testing.T) { authapi := clus.RandClient() authSetupRoot(t, authapi.Auth) - // un-authenticated client + // unauthenticated client if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserNotFound { t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/util.go b/vendor/github.com/coreos/etcd/clientv3/integration/util.go new file mode 100644 index 000000000000..b2712c93a319 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/integration/util.go @@ -0,0 +1,35 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +// mustWaitPinReady waits up to 3-second until connection is up (pin endpoint). +// Fatal on time-out. +func mustWaitPinReady(t *testing.T, cli *clientv3.Client) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := cli.Get(ctx, "foo") + cancel() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/integration/watch_test.go b/vendor/github.com/coreos/etcd/clientv3/integration/watch_test.go index 4b2fa18858b3..d92433b67332 100644 --- a/vendor/github.com/coreos/etcd/clientv3/integration/watch_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/integration/watch_test.go @@ -28,8 +28,10 @@ import ( "github.com/coreos/etcd/integration" mvccpb "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/testutil" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) type watcherTest func(*testing.T, *watchctx) @@ -51,8 +53,8 @@ func runWatchTest(t *testing.T, f watcherTest) { wclientMember := rand.Intn(3) w := clus.Client(wclientMember).Watcher - // select a different client from wclient so puts succeed if - // a test knocks out the watcher client + // select a different client for KV operations so puts succeed if + // a test knocks out the watcher client. kvMember := rand.Intn(3) for kvMember == wclientMember { kvMember = rand.Intn(3) @@ -309,7 +311,7 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) { select { case <-time.After(time.Second): t.Fatalf("took too long to cancel") - case v, ok := <-wctx.ch: + case _, ok := <-wctx.ch: if !ok { // closed before getting put; OK break @@ -318,8 +320,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) { select { case <-time.After(time.Second): t.Fatalf("took too long to close") - case v, ok = <-wctx.ch: - if ok { + case v, ok2 := <-wctx.ch: + if ok2 { t.Fatalf("expected watcher channel to close, got %v", v) } } @@ -800,7 +802,8 @@ func TestWatchWithFilter(t *testing.T) { } } -// TestWatchWithCreatedNotification checks that createdNotification works. +// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a +// Created watch response. func TestWatchWithCreatedNotification(t *testing.T) { cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer cluster.Terminate(t) @@ -837,8 +840,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) { cluster.Members[0].DropConnections() - // try to receive from watch channel again - // ensure it doesn't post another createNotify + // check watch channel doesn't post another watch response. select { case wresp := <-wch: t.Fatalf("got unexpected watch response: %+v\n", wresp) @@ -856,10 +858,26 @@ func TestWatchCancelOnServer(t *testing.T) { client := cluster.RandClient() numWatches := 10 + // The grpc proxy starts watches to detect leadership after the proxy server + // returns as started; to avoid racing on the proxy's internal watches, wait + // until require leader watches get create responses to ensure the leadership + // watches have started. + for { + ctx, cancel := context.WithCancel(clientv3.WithRequireLeader(context.TODO())) + ww := client.Watch(ctx, "a", clientv3.WithCreatedNotify()) + wresp := <-ww + cancel() + if wresp.Err() == nil { + break + } + } + cancels := make([]context.CancelFunc, numWatches) for i := 0; i < numWatches; i++ { - // use WithTimeout to force separate streams in client - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + // force separate streams in client + md := metadata.Pairs("some-key", fmt.Sprintf("%d", i)) + mctx := metadata.NewOutgoingContext(context.Background(), md) + ctx, cancel := context.WithCancel(mctx) cancels[i] = cancel w := client.Watch(ctx, fmt.Sprintf("%d", i), clientv3.WithCreatedNotify()) <-w @@ -885,7 +903,7 @@ func TestWatchCancelOnServer(t *testing.T) { t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr) } - if maxWatchV-minWatchV != numWatches { + if maxWatchV-minWatchV < numWatches { t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV) } } @@ -916,12 +934,12 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) defer clus.Terminate(t) - // each unique context "%v" has a unique grpc stream n := 100 ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5) for i := range ctxs { - // make "%v" unique - ctxs[i] = context.WithValue(context.TODO(), "key", i) + // make unique stream + md := metadata.Pairs("some-key", fmt.Sprintf("%d", i)) + ctxs[i] = metadata.NewOutgoingContext(context.Background(), md) // limits the maximum number of outstanding watchers per stream ctxc[i] = make(chan struct{}, 2) } @@ -946,7 +964,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) t.Fatalf("unexpected closed channel %p", wch) } // may take a second or two to reestablish a watcher because of - // grpc backoff policies for disconnects + // grpc back off policies for disconnects case <-time.After(5 * time.Second): t.Errorf("timed out waiting for watch on %p", wch) } @@ -970,7 +988,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) } } -// TestWatchCanelAndCloseClient ensures that canceling a watcher then immediately +// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately // closing the client does not return a client closing error. func TestWatchCancelAndCloseClient(t *testing.T) { defer testutil.AfterTest(t) diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index dc45fa4ad06d..6289605c8e05 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -16,6 +16,7 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -66,22 +67,46 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse + txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} type kv struct { - remote pb.KVClient + remote pb.KVClient + callOpts []grpc.CallOption } func NewKV(c *Client) KV { - return &kv{remote: RetryKVClient(c)} + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api } -func NewKVFromKVClient(remote pb.KVClient) KV { - return &kv{remote: remote} +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { @@ -100,7 +125,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -109,54 +134,43 @@ func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*C func (kv *kv) Txn(ctx context.Context) Txn { return &txn{ - kv: kv, - ctx: ctx, + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, } } func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - for { - resp, err := kv.do(ctx, op) - if err == nil { - return resp, nil - } - - if isHaltErr(ctx, err) { - return resp, toErr(ctx, err) - } - // do not retry on modifications - if op.isWrite() { - return resp, toErr(ctx, err) - } - } -} - -func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { - // TODO: handle other ops case tRange: var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - resp, err = kv.remote.Put(ctx, r) + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil } case tDeleteRange: var resp *pb.DeleteRangeResponse r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r) + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } default: panic("Unknown op") } - return OpResponse{}, err + return OpResponse{}, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index 6e7ea1f82d08..e74e1d6b549f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -30,7 +31,7 @@ type ( LeaseID int64 ) -// LeaseGrantResponse is used to convert the protobuf grant response. +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. type LeaseGrantResponse struct { *pb.ResponseHeader ID LeaseID @@ -38,14 +39,14 @@ type LeaseGrantResponse struct { Error string } -// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. type LeaseKeepAliveResponse struct { *pb.ResponseHeader ID LeaseID TTL int64 } -// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. type LeaseTimeToLiveResponse struct { *pb.ResponseHeader ID LeaseID `json:"id"` @@ -60,6 +61,12 @@ type LeaseTimeToLiveResponse struct { Keys [][]byte `json:"keys"` } +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + const ( // defaultTTL is the assumed lease TTL used for the first keepalive // deadline before the actual TTL is known to the client. @@ -101,7 +108,7 @@ type Lease interface { // KeepAlive keeps the given lease alive forever. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - // KeepAliveOnce renews the lease once. In most of the cases, Keepalive + // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive // should be used instead of KeepAliveOnce. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) @@ -133,6 +140,8 @@ type lessor struct { // firstKeepAliveOnce ensures stream starts after first KeepAlive call. firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -148,10 +157,10 @@ type keepAlive struct { } func NewLease(c *Client) Lease { - return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second) + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) } -func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease { +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { l := &lessor{ donec: make(chan struct{}), keepAlives: make(map[LeaseID]*keepAlive), @@ -161,62 +170,52 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Durati if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } + if c != nil { + l.callOpts = c.callOpts + } reqLeaderCtx := WithRequireLeader(context.Background()) l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) return l } func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - for { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - for { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r) - - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil } + return nil, toErr(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - for { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false)) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -314,7 +313,7 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha } } -// closeRequireLeader scans all keep alives for ctxs that have require leader +// closeRequireLeader scans keepAlives for ctxs that have require leader // and closes the associated channels. func (l *lessor) closeRequireLeader() { l.mu.Lock() @@ -323,7 +322,7 @@ func (l *lessor) closeRequireLeader() { reqIdxs := 0 // find all required leader channels, close, mark as nil for i, ctx := range ka.ctxs { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromOutgoingContext(ctx) if !ok { continue } @@ -357,7 +356,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive cctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -386,7 +385,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { close(l.donec) l.loopErr = gerr for _, ka := range l.keepAlives { - ka.Close() + ka.close() } l.keepAlives = make(map[LeaseID]*keepAlive) l.mu.Unlock() @@ -401,7 +400,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { } else { for { resp, err := stream.Recv() - if err != nil { if canceledByCaller(l.stopCtx, err) { return err @@ -426,10 +424,10 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { } } -// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests +// resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) if err != nil { cancel() return nil, err @@ -467,7 +465,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) - ka.Close() + ka.close() return } @@ -497,7 +495,7 @@ func (l *lessor) deadlineLoop() { for id, ka := range l.keepAlives { if ka.deadline.Before(now) { // waited too long for response; lease may be expired - ka.Close() + ka.close() delete(l.keepAlives, id) } } @@ -505,7 +503,7 @@ func (l *lessor) deadlineLoop() { } } -// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for { var tosend []LeaseID @@ -539,7 +537,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { } } -func (ka *keepAlive) Close() { +func (ka *keepAlive) close() { close(ka.donec) for _, ch := range ka.chs { close(ch) diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 519db45d8e34..012abdbce639 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -16,36 +16,35 @@ package clientv3 import ( "io/ioutil" - "log" "sync" "google.golang.org/grpc/grpclog" ) // Logger is the logger used by client library. -// It implements grpclog.Logger interface. -type Logger grpclog.Logger +// It implements grpclog.LoggerV2 interface. +type Logger grpclog.LoggerV2 var ( logger settableLogger ) type settableLogger struct { - l grpclog.Logger + l grpclog.LoggerV2 mu sync.RWMutex } func init() { // disable client side logs by default logger.mu.Lock() - logger.l = log.New(ioutil.Discard, "", 0) + logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) // logger has to override the grpclog at initialization so that // any changes to the grpclog go through logger with locking // instead of through SetLogger // // now updates only happen through settableLogger.set - grpclog.SetLogger(&logger) + grpclog.SetLoggerV2(&logger) logger.mu.Unlock() } @@ -62,6 +61,7 @@ func GetLogger() Logger { func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l + grpclog.SetLoggerV2(&logger) s.mu.Unlock() } @@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger { return l } -// implement the grpclog.Logger interface +// implement the grpclog.LoggerV2 interface +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/main_test.go b/vendor/github.com/coreos/etcd/clientv3/main_test.go index eb5a7135b03c..89b30860b828 100644 --- a/vendor/github.com/coreos/etcd/clientv3/main_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/main_test.go @@ -32,17 +32,23 @@ func init() { auth.BcryptCost = bcrypt.MinCost } // TestMain sets up an etcd cluster if running the examples. func TestMain(m *testing.M) { - useCluster := true // default to running all tests + useCluster, hasRunArg := false, false // default to running only Test* for _, arg := range os.Args { if strings.HasPrefix(arg, "-test.run=") { exp := strings.Split(arg, "=")[1] match, err := regexp.MatchString(exp, "Example") useCluster = (err == nil && match) || strings.Contains(exp, "Example") + hasRunArg = true break } } + if !hasRunArg { + // force only running Test* if no args given to avoid leak false + // positives from having a long-running cluster for the examples. + os.Args = append(os.Args, "-test.run=Test") + } - v := 0 + var v int if useCluster { cfg := integration.ClusterConfig{Size: 3} clus := integration.NewClusterV3(nil, &cfg) diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index 2a75b7e9c575..67b928fcfb3a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -37,7 +37,7 @@ type Maintenance interface { // AlarmDisarm disarms a given alarm. AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - // Defragment defragments storage backend of the etcd member with given endpoint. + // Defragment releases wasted space from internal fragmentation on a given etcd member. // Defragment is only needed when deleting a large number of keys and want to reclaim // the resources. // Defragment is an expensive operation. User should avoid defragmenting multiple members @@ -49,36 +49,45 @@ type Maintenance interface { // Status gets the status of the endpoint. Status(ctx context.Context, endpoint string) (*StatusResponse, error) - // Snapshot provides a reader for a snapshot of a backend. + // Snapshot provides a reader for a point-in-time snapshot of etcd. Snapshot(ctx context.Context) (io.ReadCloser, error) } type maintenance struct { - dial func(endpoint string) (pb.MaintenanceClient, func(), error) - remote pb.MaintenanceClient + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption } func NewMaintenance(c *Client) Maintenance { - return &maintenance{ + api := &maintenance{ dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { conn, err := c.dial(endpoint) if err != nil { return nil, nil, err } cancel := func() { conn.Close() } - return pb.NewMaintenanceClient(conn), cancel, nil + return RetryMaintenanceClient(c, conn), cancel, nil }, - remote: pb.NewMaintenanceClient(c.conn), + remote: RetryMaintenanceClient(c, c.conn), } + if c != nil { + api.callOpts = c.callOpts + } + return api } -func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance { - return &maintenance{ +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ dial: func(string) (pb.MaintenanceClient, func(), error) { return remote, func() {}, nil }, remote: remote, } + if c != nil { + api.callOpts = c.callOpts + } + return api } func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { @@ -87,15 +96,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { MemberID: 0, // all Alarm: pb.AlarmType_NONE, // all } - for { - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil } + return nil, toErr(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -121,7 +126,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR return &ret, nil } - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) if err == nil { return (*AlarmResponse)(resp), nil } @@ -134,7 +139,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm return nil, toErr(ctx, err) } defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -147,7 +152,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo return nil, toErr(ctx, err) } defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -155,7 +160,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo } func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/mirror/syncer.go b/vendor/github.com/coreos/etcd/clientv3/mirror/syncer.go index f2a8f107f678..8f302b2128e7 100644 --- a/vendor/github.com/coreos/etcd/clientv3/mirror/syncer.go +++ b/vendor/github.com/coreos/etcd/clientv3/mirror/syncer.go @@ -17,6 +17,7 @@ package mirror import ( "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go index f3e82d6b89a8..2b759e0d394a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go @@ -15,11 +15,11 @@ package namespace import ( - "golang.org/x/net/context" - "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" ) type kvPrefix struct { diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go index fc7c228699db..c3167fa5d876 100644 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go @@ -17,9 +17,9 @@ package namespace import ( "bytes" - "golang.org/x/net/context" - "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" ) type leasePrefix struct { diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go index 6257e2968295..9907211529a0 100644 --- a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go @@ -17,9 +17,9 @@ package namespace import ( "sync" - "golang.org/x/net/context" - "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" ) type watcherPrefix struct { diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go index 9bb92ea61632..7fabc4f109ab 100644 --- a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go +++ b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go @@ -19,11 +19,12 @@ import ( "fmt" etcd "github.com/coreos/etcd/clientv3" - "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" ) var ErrWatcherClosed = fmt.Errorf("naming: watch closed") @@ -39,13 +40,13 @@ func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Upd case naming.Add: var v []byte if v, err = json.Marshal(nm); err != nil { - return grpc.Errorf(codes.InvalidArgument, err.Error()) + return status.Error(codes.InvalidArgument, err.Error()) } _, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...) case naming.Delete: _, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...) default: - return grpc.Errorf(codes.InvalidArgument, "naming: bad naming op") + return status.Error(codes.InvalidArgument, "naming: bad naming op") } return err } @@ -80,7 +81,7 @@ func (gw *gRPCWatcher) Next() ([]*naming.Update, error) { // process new events on target/* wr, ok := <-gw.wch if !ok { - gw.err = grpc.Errorf(codes.Unavailable, "%s", ErrWatcherClosed) + gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error()) return nil, gw.err } if gw.err = wr.Err(); gw.err != nil { diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc_test.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc_test.go index ad2d206626b4..55cff3837dc8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/naming/grpc_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/naming/grpc_test.go @@ -19,12 +19,12 @@ import ( "reflect" "testing" - "golang.org/x/net/context" - "google.golang.org/grpc/naming" - etcd "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" + + "golang.org/x/net/context" + "google.golang.org/grpc/naming" ) func TestGRPCResolver(t *testing.T) { @@ -66,10 +66,13 @@ func TestGRPCResolver(t *testing.T) { delOp := naming.Update{Op: naming.Delete, Addr: "127.0.0.1"} err = r.Update(context.TODO(), "foo", delOp) + if err != nil { + t.Fatalf("failed to udpate %v", err) + } us, err = w.Next() if err != nil { - t.Fatal("failed to get udpate", err) + t.Fatalf("failed to get udpate %v", err) } wu = &naming.Update{ @@ -83,7 +86,7 @@ func TestGRPCResolver(t *testing.T) { } } -// TestGRPCResolverMultiInit ensures the resolver will initialize +// TestGRPCResolverMulti ensures the resolver will initialize // correctly with multiple hosts and correctly receive multiple // updates in a single revision. func TestGRPCResolverMulti(t *testing.T) { diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index 63b99501e826..e18d28662c46 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -23,6 +23,7 @@ const ( tRange opType = iota + 1 tPut tDeleteRange + tTxn ) var ( @@ -67,9 +68,17 @@ type Op struct { // for put val []byte leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op } -// accesors / mutators +// accessors / mutators + +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } // KeyBytes returns the byte slice holding the Op's key. func (op Op) KeyBytes() []byte { return op.key } @@ -80,6 +89,39 @@ func (op *Op) WithKeyBytes(key []byte) { op.key = key } // RangeBytes returns the byte slice holding with the Op's range end, if any. func (op Op) RangeBytes() []byte { return op.end } +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable == true } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly == true } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly == true } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + // WithRangeBytes sets the byte slice for the Op's range end. func (op *Op) WithRangeBytes(end []byte) { op.end = end } @@ -113,6 +155,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: @@ -129,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp { } func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } return op.t != tRange } @@ -194,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) @@ -247,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption { if target == SortByKey && order == SortAscend { // If order != SortNone, server fetches the entire key-space, // and then applies the sort and limit, if provided. - // Since current mvcc.Range implementation returns results - // sorted by keys in lexicographically ascending order, - // client should ignore SortOrder if the target is SortByKey. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. order = SortNone } op.sort = &SortOption{target, order} @@ -390,7 +465,7 @@ func WithPrevKV() OpOption { } // WithIgnoreValue updates the key using its current value. -// Empty value should be passed when ignore_value is set. +// This option can not be combined with non-empty values. // Returns an error if the key does not exist. func WithIgnoreValue() OpOption { return func(op *Op) { @@ -399,7 +474,7 @@ func WithIgnoreValue() OpOption { } // WithIgnoreLease updates the key using its current lease. -// Empty lease should be passed when ignore_lease is set. +// This option can not be combined with WithLease. // Returns an error if the key does not exist. func WithIgnoreLease() OpOption { return func(op *Op) { @@ -424,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) { } } -// WithAttachedKeys requests lease timetolive API to return -// attached keys of given lease ID. +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. func WithAttachedKeys() LeaseOption { return func(op *LeaseOp) { op.attachedKeys = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 000000000000..23eea9367ff7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "golang.org/x/net/context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 78f31a8c4b0c..c95b2cad7c4f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -17,135 +17,183 @@ package clientv3 import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) error +type retryRPCFunc func(context.Context, rpcFunc) error +type retryStopErrFunc func(error) bool + +func isRepeatableStopError(err error) bool { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + return true + } + // only retry if unavailable + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable +} + +func isNonRepeatableStopError(err error) bool { + ev, _ := status.FromError(err) + if ev.Code() != codes.Unavailable { + return true + } + desc := rpctypes.ErrorDesc(err) + return desc != "there is no address available" && desc != "there is no connection available" +} -func (c *Client) newRetryWrapper() retryRpcFunc { +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { + return err + } + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if _, ok := eErr.(rpctypes.EtcdError); ok { - return err + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) } - // only retry if unavailable - if grpc.Code(err) != codes.Unavailable { - return err + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } } - select { - case <-c.balancer.ConnectNotify(): - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-c.ctx.Done(): - return c.ctx.Err() + if isStop(err) { + return err } } } } -func (c *Client) newAuthRetryWrapper() retryRpcFunc { +func (c *Client) newAuthRetryWrapper() retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } return err // return the original error for simplicity } continue } - return err } } } -// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. +// RetryKVClient implements a KVClient. func RetryKVClient(c *Client) pb.KVClient { - retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} - return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + conn := pb.NewKVClient(c.conn) + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} + retryAuthWrapper := c.newAuthRetryWrapper() + return &retryKVClient{ + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, + retryAuthWrapper} } type retryKVClient struct { - *retryWriteKVClient + *nonRepeatableKVClient + repeatableRetry retryRPCFunc } func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) return err }) return resp, err } -type retryWriteKVClient struct { - pb.KVClient - retryf retryRpcFunc +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc } -func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Put(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Txn(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Compact(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) return err }) return resp, err } type retryLeaseClient struct { - pb.LeaseClient - retryf retryRpcFunc + lc pb.LeaseClient + repeatableRetry retryRPCFunc } -// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. +// RetryLeaseClient implements a LeaseClient. func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} - return &retryLeaseClient{retry, c.retryAuthWrapper} + retry := &retryLeaseClient{ + pb.NewLeaseClient(c.conn), + c.newRetryWrapper(isRepeatableStopError), + } + return &retryLeaseClient{retry, c.newAuthRetryWrapper()} +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) return err }) return resp, err @@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) return err }) return resp, err } +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + type retryClusterClient struct { - pb.ClusterClient - retryf retryRpcFunc + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc } -// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. +// RetryClusterClient implements a ClusterClient. func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc } -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) return err }) return resp, err } type retryAuthClient struct { - pb.AuthClient - retryf retryRpcFunc + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc } -// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. +// RetryAuthClient implements a AuthClient. func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) + return err + }) + return resp, err } -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) return err }) return resp, err diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index 7bde6fd7253c..1a80c8ebaabe 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -18,13 +18,14 @@ import ( "sync" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. // -// Tx.If( +// Txn(context.TODO()).If( // Compare(Value(k1), ">", v1), // Compare(Version(k1), "=", 2) // ).Then( @@ -66,6 +67,8 @@ type txn struct { sus []*pb.RequestOp fas []*pb.RequestOp + + callOpts []grpc.CallOption } func (txn *txn) If(cs ...Cmp) Txn { @@ -135,30 +138,14 @@ func (txn *txn) Else(ops ...Op) Txn { func (txn *txn) Commit() (*TxnResponse, error) { txn.mu.Lock() defer txn.mu.Unlock() - for { - resp, err := txn.commit() - if err == nil { - return resp, err - } - if isHaltErr(txn.ctx, err) { - return nil, toErr(txn.ctx, err) - } - if txn.isWrite { - return nil, toErr(txn.ctx, err) - } - } -} -func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - var opts []grpc.CallOption - if !txn.isWrite { - opts = []grpc.CallOption{grpc.FailFast(false)} - } - resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) if err != nil { - return nil, err + return nil, toErr(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index a707f7449487..16a91fdff404 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -22,9 +22,12 @@ import ( v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) const ( @@ -40,10 +43,9 @@ type WatchChan <-chan WatchResponse type Watcher interface { // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. - // If the watch is slow or the required rev is compacted, the watch request - // might be canceled from the server-side and the chan will be closed. - // 'opts' can be: 'WithRev' and/or 'WithPrefix'. + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // Close closes the watcher and cancels all watch requests. @@ -90,7 +92,7 @@ func (wr *WatchResponse) Err() error { return v3rpc.ErrCompacted case wr.Canceled: if len(wr.cancelReason) != 0 { - return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason)) + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) } return v3rpc.ErrFutureRev } @@ -104,7 +106,8 @@ func (wr *WatchResponse) IsProgressNotify() bool { // watcher implements the Watcher interface type watcher struct { - remote pb.WatchClient + remote pb.WatchClient + callOpts []grpc.CallOption // mu protects the grpc streams map mu sync.RWMutex @@ -115,8 +118,9 @@ type watcher struct { // watchGrpcStream tracks all watch resources attached to a single grpc stream. type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption // ctx controls internal remote.Watch requests ctx context.Context @@ -135,7 +139,7 @@ type watchGrpcStream struct { respc chan *pb.WatchResponse // donec closes to broadcast shutdown donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconn logic + // errc transmits errors from grpc Recv to the watch stream reconnect logic errc chan error // closingc gets the watcherStream of closing watchers closingc chan *watcherStream @@ -187,14 +191,18 @@ type watcherStream struct { } func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) } -func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { - return &watcher{ +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ remote: wc, streams: make(map[string]*watchGrpcStream), } + if c != nil { + w.callOpts = c.callOpts + } + return w } // never closes @@ -213,17 +221,17 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { wgs := &watchGrpcStream{ owner: w, remote: w.remote, + callOpts: w.callOpts, ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), + ctxKey: streamKeyFromCtx(inctx), cancel: cancel, substreams: make(map[int64]*watcherStream), - - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -254,7 +262,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } ok := false - ctxKey := fmt.Sprintf("%v", ctx) + ctxKey := streamKeyFromCtx(ctx) // find or allocate appropriate grpc watch stream w.mu.Lock() @@ -317,14 +325,14 @@ func (w *watcher) Close() (err error) { w.streams = nil w.mu.Unlock() for _, wgs := range streams { - if werr := wgs.Close(); werr != nil { + if werr := wgs.close(); werr != nil { err = werr } } return err } -func (w *watchGrpcStream) Close() (err error) { +func (w *watchGrpcStream) close() (err error) { w.cancel() <-w.donec select { @@ -435,7 +443,7 @@ func (w *watchGrpcStream) run() { initReq: *wreq, id: -1, outc: outc, - // unbufffered so resumes won't cause repeat events + // unbuffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), } @@ -487,7 +495,7 @@ func (w *watchGrpcStream) run() { req := &pb.WatchRequest{RequestUnion: cr} wc.Send(req) } - // watch client failed to recv; spawn another if possible + // watch client failed on Recv; spawn another if possible case err := <-w.errc: if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err @@ -749,7 +757,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str return donec } -// joinSubstream waits for all substream goroutines to complete +// joinSubstreams waits for all substream goroutines to complete. func (w *watchGrpcStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec @@ -761,7 +769,9 @@ func (w *watchGrpcStream) joinSubstreams() { } } -// openWatchClient retries opening a watchclient until retryConnection fails +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { select { @@ -772,7 +782,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return nil, err default: } - if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { break } if isHaltErr(w.ctx, err) { @@ -782,7 +792,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ StartRevision: wr.rev, @@ -795,3 +805,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest { cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} } + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/clientv3/yaml/config.go b/vendor/github.com/coreos/etcd/clientv3/yaml/config.go index aa6df1c18304..7539da294c1b 100644 --- a/vendor/github.com/coreos/etcd/clientv3/yaml/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/yaml/config.go @@ -33,7 +33,11 @@ type yamlConfig struct { InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` Certfile string `json:"cert-file"` Keyfile string `json:"key-file"` - CAfile string `json:"ca-file"` + TrustedCAfile string `json:"trusted-ca-file"` + + // CAfile is being deprecated. Use 'TrustedCAfile' instead. + // TODO: deprecate this in v4 + CAfile string `json:"ca-file"` } // NewConfig creates a new clientv3.Config from a yaml file. @@ -66,8 +70,11 @@ func NewConfig(fpath string) (*clientv3.Config, error) { } } - if yc.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{yc.CAfile}) + if yc.CAfile != "" && yc.TrustedCAfile == "" { + yc.TrustedCAfile = yc.CAfile + } + if yc.TrustedCAfile != "" { + cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile}) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/clientv3/yaml/config_test.go b/vendor/github.com/coreos/etcd/clientv3/yaml/config_test.go index 3dc221fdb7e7..bcf28b434570 100644 --- a/vendor/github.com/coreos/etcd/clientv3/yaml/config_test.go +++ b/vendor/github.com/coreos/etcd/clientv3/yaml/config_test.go @@ -50,7 +50,7 @@ func TestConfigFromFile(t *testing.T) { &yamlConfig{ Keyfile: privateKeyPath, Certfile: certPath, - CAfile: caPath, + TrustedCAfile: caPath, InsecureSkipTLSVerify: true, }, false, @@ -64,9 +64,9 @@ func TestConfigFromFile(t *testing.T) { }, { &yamlConfig{ - Keyfile: privateKeyPath, - Certfile: certPath, - CAfile: "bad", + Keyfile: privateKeyPath, + Certfile: certPath, + TrustedCAfile: "bad", }, true, }, @@ -113,7 +113,7 @@ func TestConfigFromFile(t *testing.T) { if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 { t.Errorf("#%d: failed to load in cert", i) } - if tt.ym.CAfile != "" && cfg.TLS.RootCAs == nil { + if tt.ym.TrustedCAfile != "" && cfg.TLS.RootCAs == nil { t.Errorf("#%d: failed to load in ca cert", i) } if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify { diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go index 270287072d81..5cf7b65094a3 100644 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ b/vendor/github.com/coreos/etcd/compactor/compactor.go @@ -105,7 +105,7 @@ func (t *Periodic) Run() { last = clock.Now() plog.Noticef("Finished auto-compaction at revision %d", rev) } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) + plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) plog.Noticef("Retry after %v", checkCompactionInterval) } } diff --git a/vendor/github.com/coreos/etcd/e2e/docker/Dockerfile b/vendor/github.com/coreos/etcd/e2e/docker/Dockerfile index c94e1612a2ae..e2708a7df366 100644 --- a/vendor/github.com/coreos/etcd/e2e/docker/Dockerfile +++ b/vendor/github.com/coreos/etcd/e2e/docker/Dockerfile @@ -1,12 +1,23 @@ -FROM golang:1.8.3-stretch -LABEL Description="Image for etcd DNS testing" -RUN apt update -y -RUN go get github.com/mattn/goreman -RUN apt install -y bind9 -RUN mkdir /var/bind -RUN chown bind /var/bind -ADD Procfile.tls /Procfile.tls -ADD run.sh /run.sh -ADD named.conf etcd.zone rdns.zone /etc/bind/ -ADD resolv.conf /etc/resolv.conf -CMD ["/run.sh"] \ No newline at end of file +FROM golang:1.8.4-stretch + +RUN apt-get -y update +RUN apt-get -y install \ + netcat \ + libaspell-dev \ + libhunspell-dev \ + hunspell-en-us \ + aspell-en \ + shellcheck + +RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd +WORKDIR ${GOPATH}/src/github.com/coreos/etcd + +ADD ./scripts/install-marker.sh ./scripts/install-marker.sh + +RUN go get -v -u -tags spell github.com/chzchzchz/goword \ + && go get -v -u github.com/coreos/license-bill-of-materials \ + && go get -v -u honnef.co/go/tools/cmd/gosimple \ + && go get -v -u honnef.co/go/tools/cmd/unused \ + && go get -v -u honnef.co/go/tools/cmd/staticcheck \ + && go get -v -u github.com/wadey/gocovmerge \ + && ./scripts/install-marker.sh amd64 diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go index b274bc68ec8b..90efb3937d79 100644 --- a/vendor/github.com/coreos/etcd/embed/config.go +++ b/vendor/github.com/coreos/etcd/embed/config.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "strings" + "time" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/pkg/cors" @@ -37,9 +38,13 @@ const ( ClusterStateFlagNew = "new" ClusterStateFlagExisting = "existing" - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second DefaultListenPeerURLs = "http://localhost:2380" DefaultListenClientURLs = "http://localhost:2379" @@ -85,6 +90,24 @@ type Config struct { TickMs uint `json:"heartbeat-interval"` ElectionMs uint `json:"election-timeout"` QuotaBackendBytes int64 `json:"quota-backend-bytes"` + MaxRequestBytes uint `json:"max-request-bytes"` + + // gRPC server options + + // GRPCKeepAliveMinTime is the minimum interval that a client should + // wait before pinging server. When client pings "too fast", server + // sends goaway and closes the connection (errors: too_many_pings, + // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens. + // Server expects client pings only when there is any active streams + // (PermitWithoutStream is set false). + GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` + // GRPCKeepAliveInterval is the frequency of server-to-client ping + // to check if a connection is alive. Close a non-responsive connection + // after an additional duration of Timeout. 0 to disable. + GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` + // GRPCKeepAliveTimeout is the additional duration of wait + // before closing a non-responsive connection. 0 to disable. + GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` // clustering @@ -167,23 +190,27 @@ func NewConfig() *Config { lcurl, _ := url.Parse(DefaultListenClientURLs) acurl, _ := url.Parse(DefaultAdvertiseClientURLs) cfg := &Config{ - CorsInfo: &cors.CORSInfo{}, - MaxSnapFiles: DefaultMaxSnapshots, - MaxWalFiles: DefaultMaxWALs, - Name: DefaultName, - SnapCount: etcdserver.DefaultSnapCount, - TickMs: 100, - ElectionMs: 1000, - LPUrls: []url.URL{*lpurl}, - LCUrls: []url.URL{*lcurl}, - APUrls: []url.URL{*apurl}, - ACUrls: []url.URL{*acurl}, - ClusterState: ClusterStateFlagNew, - InitialClusterToken: "etcd-cluster", - StrictReconfigCheck: true, - Metrics: "basic", - EnableV2: true, - AuthToken: "simple", + CorsInfo: &cors.CORSInfo{}, + MaxSnapFiles: DefaultMaxSnapshots, + MaxWalFiles: DefaultMaxWALs, + Name: DefaultName, + SnapCount: etcdserver.DefaultSnapCount, + MaxRequestBytes: DefaultMaxRequestBytes, + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, + GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, + GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, + TickMs: 100, + ElectionMs: 1000, + LPUrls: []url.URL{*lpurl}, + LCUrls: []url.URL{*lcurl}, + APUrls: []url.URL{*apurl}, + ACUrls: []url.URL{*acurl}, + ClusterState: ClusterStateFlagNew, + InitialClusterToken: "etcd-cluster", + StrictReconfigCheck: true, + Metrics: "basic", + EnableV2: true, + AuthToken: "simple", } cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) return cfg diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go index 73c452e62a2d..2f500f9f13b7 100644 --- a/vendor/github.com/coreos/etcd/embed/etcd.go +++ b/vendor/github.com/coreos/etcd/embed/etcd.go @@ -29,13 +29,18 @@ import ( "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/etcdserver/api/v3rpc" "github.com/coreos/etcd/pkg/cors" "github.com/coreos/etcd/pkg/debugutil" runtimeutil "github.com/coreos/etcd/pkg/runtime" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + + "github.com/cockroachdb/cmux" "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") @@ -58,12 +63,14 @@ const ( type Etcd struct { Peers []*peerListener Clients []net.Listener - Server *etcdserver.EtcdServer + // a map of contexts for the servers that serves client requests. + sctxs map[string]*serveCtx + + Server *etcdserver.EtcdServer cfg Config stopc chan struct{} errc chan error - sctxs map[string]*serveCtx closeOnce sync.Once } @@ -89,9 +96,9 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { return } if !serving { - // errored before starting gRPC server for serveCtx.grpcServerC + // errored before starting gRPC server for serveCtx.serversC for _, sctx := range e.sctxs { - close(sctx.grpcServerC) + close(sctx.serversC) } } e.Close() @@ -99,10 +106,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { }() if e.Peers, err = startPeerListeners(cfg); err != nil { - return + return e, err } if e.sctxs, err = startClientListeners(cfg); err != nil { - return + return e, err } for _, sctx := range e.sctxs { e.Clients = append(e.Clients, sctx.l) @@ -140,43 +147,31 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { ElectionTicks: cfg.ElectionTicks(), AutoCompactionRetention: cfg.AutoCompactionRetention, QuotaBackendBytes: cfg.QuotaBackendBytes, + MaxRequestBytes: cfg.MaxRequestBytes, StrictReconfigCheck: cfg.StrictReconfigCheck, ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, AuthToken: cfg.AuthToken, + Debug: cfg.Debug, } if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { - return - } - - // configure peer handlers after rafthttp.Transport started - ph := etcdhttp.NewPeerHandler(e.Server) - for _, p := range e.Peers { - srv := &http.Server{ - Handler: ph, - ReadTimeout: 5 * time.Minute, - ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error - } - - l := p.Listener - p.serve = func() error { return srv.Serve(l) } - p.close = func(ctx context.Context) error { - // gracefully shutdown http.Server - // close open listeners, idle connections - // until context cancel or time-out - return srv.Shutdown(ctx) - } + return e, err } // buffer channel so goroutines on closed connections won't wait forever e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) e.Server.Start() - if err = e.serve(); err != nil { - return + + if err = e.servePeers(); err != nil { + return e, err + } + if err = e.serveClients(); err != nil { + return e, err } + serving = true - return + return e, nil } // Config returns the current configuration. @@ -184,38 +179,29 @@ func (e *Etcd) Config() Config { return e.cfg } +// Close gracefully shuts down all servers/listeners. +// Client requests will be terminated with request timeout. +// After timeout, enforce remaning requests be closed immediately. func (e *Etcd) Close() { e.closeOnce.Do(func() { close(e.stopc) }) + // close client requests with request timeout timeout := 2 * time.Second if e.Server != nil { timeout = e.Server.Cfg.ReqTimeout() } for _, sctx := range e.sctxs { - for gs := range sctx.grpcServerC { - ch := make(chan struct{}) - go func() { - defer close(ch) - // close listeners to stop accepting new connections, - // will block on any existing transports - gs.GracefulStop() - }() - // wait until all pending RPCs are finished - select { - case <-ch: - case <-time.After(timeout): - // took too long, manually close open transports - // e.g. watch streams - gs.Stop() - // concurrent GracefulStop should be interrupted - <-ch - } + for ss := range sctx.serversC { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + stopServers(ctx, ss) + cancel() } } for _, sctx := range e.sctxs { sctx.cancel() } + for i := range e.Clients { if e.Clients[i] != nil { e.Clients[i].Close() @@ -237,6 +223,43 @@ func (e *Etcd) Close() { } } +func stopServers(ctx context.Context, ss *servers) { + shutdownNow := func() { + // first, close the http.Server + ss.http.Shutdown(ctx) + // then close grpc.Server; cancels all active RPCs + ss.grpc.Stop() + } + + // do not grpc.Server.GracefulStop with TLS enabled etcd server + // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 + // and https://github.com/coreos/etcd/issues/8916 + if ss.secure { + shutdownNow() + return + } + + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + ss.grpc.GracefulStop() + }() + + // wait until all pending RPCs are finished + select { + case <-ch: + case <-ctx.Done(): + // took too long, manually close open transports + // e.g. watch streams + shutdownNow() + + // concurrent GracefulStop should be interrupted + <-ch + } +} + func (e *Etcd) Err() <-chan error { return e.errc } func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { @@ -265,7 +288,9 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { for i := range peers { if peers[i] != nil && peers[i].close != nil { plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) - peers[i].close(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + peers[i].close(ctx) + cancel() } } }() @@ -293,6 +318,45 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { return peers, nil } +// configure peer handlers after rafthttp.Transport started +func (e *Etcd) servePeers() (err error) { + ph := etcdhttp.NewPeerHandler(e.Server) + var peerTLScfg *tls.Config + if !e.cfg.PeerTLSInfo.Empty() { + if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { + return err + } + } + + for _, p := range e.Peers { + gs := v3rpc.Server(e.Server, peerTLScfg) + m := cmux.New(p.Listener) + go gs.Serve(m.Match(cmux.HTTP2())) + srv := &http.Server{ + Handler: grpcHandlerFunc(gs, ph), + ReadTimeout: 5 * time.Minute, + ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + } + go srv.Serve(m.Match(cmux.Any())) + p.serve = func() error { return m.Serve() } + p.close = func(ctx context.Context) error { + // gracefully shutdown http.Server + // close open listeners, idle connections + // until context cancel or time-out + stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) + return nil + } + } + + // start peer servers in a goroutine + for _, pl := range e.Peers { + go func(l *peerListener) { + e.errHandler(l.serve()) + }(pl) + } + return nil +} + func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() { chosts := make([]string, len(cfg.LCUrls)) @@ -384,7 +448,7 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { return sctxs, nil } -func (e *Etcd) serve() (err error) { +func (e *Etcd) serveClients() (err error) { var ctlscfg *tls.Config if !e.cfg.ClientTLSInfo.Empty() { plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) @@ -397,13 +461,6 @@ func (e *Etcd) serve() (err error) { plog.Infof("cors = %s", e.cfg.CorsInfo) } - // Start the peer server in a goroutine - for _, pl := range e.Peers { - go func(l *peerListener) { - e.errHandler(l.serve()) - }(pl) - } - // Start a client server goroutine for each listen address var h http.Handler if e.Config().EnableV2 { @@ -415,9 +472,25 @@ func (e *Etcd) serve() (err error) { } h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) + gopts := []grpc.ServerOption{} + if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: e.cfg.GRPCKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && + e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: e.cfg.GRPCKeepAliveInterval, + Timeout: e.cfg.GRPCKeepAliveTimeout, + })) + } + + // start client servers in a goroutine for _, sctx := range e.sctxs { go func(s *serveCtx) { - e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler)) + e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...)) }(sctx) } return nil diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go index 602074c78457..b659bf8b7d68 100644 --- a/vendor/github.com/coreos/etcd/embed/serve.go +++ b/vendor/github.com/coreos/etcd/embed/serve.go @@ -53,20 +53,34 @@ type serveCtx struct { userHandlers map[string]http.Handler serviceRegister func(*grpc.Server) - grpcServerC chan *grpc.Server + serversC chan *servers +} + +type servers struct { + secure bool + grpc *grpc.Server + http *http.Server } func newServeCtx() *serveCtx { ctx, cancel := context.WithCancel(context.Background()) - return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler), - grpcServerC: make(chan *grpc.Server, 2), // in case sctx.insecure,sctx.secure true + return &serveCtx{ + ctx: ctx, + cancel: cancel, + userHandlers: make(map[string]http.Handler), + serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true } } // serve accepts incoming connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. -func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handler http.Handler, errHandler func(error)) error { +func (sctx *serveCtx) serve( + s *etcdserver.EtcdServer, + tlscfg *tls.Config, + handler http.Handler, + errHandler func(error), + gopts ...grpc.ServerOption) error { logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) <-s.ReadyNotify() plog.Info("ready to serve client requests") @@ -77,8 +91,7 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle servLock := v3lock.NewLockServer(v3c) if sctx.insecure { - gs := v3rpc.Server(s, nil) - sctx.grpcServerC <- gs + gs := v3rpc.Server(s, nil, gopts...) v3electionpb.RegisterElectionServer(gs, servElection) v3lockpb.RegisterLockServer(gs, servLock) if sctx.serviceRegister != nil { @@ -87,9 +100,7 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle grpcl := m.Match(cmux.HTTP2()) go func() { errHandler(gs.Serve(grpcl)) }() - opts := []grpc.DialOption{ - grpc.WithInsecure(), - } + opts := []grpc.DialOption{grpc.WithInsecure()} gwmux, err := sctx.registerGateway(opts) if err != nil { return err @@ -103,12 +114,13 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle } httpl := m.Match(cmux.HTTP1()) go func() { errHandler(srvhttp.Serve(httpl)) }() + + sctx.serversC <- &servers{grpc: gs, http: srvhttp} plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) } if sctx.secure { - gs := v3rpc.Server(s, tlscfg) - sctx.grpcServerC <- gs + gs := v3rpc.Server(s, tlscfg, gopts...) v3electionpb.RegisterElectionServer(gs, servElection) v3lockpb.RegisterLockServer(gs, servLock) if sctx.serviceRegister != nil { @@ -137,10 +149,11 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle } go func() { errHandler(srv.Serve(tlsl)) }() + sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} plog.Infof("serving client requests on %s", sctx.l.Addr().String()) } - close(sctx.grpcServerC) + close(sctx.serversC) return m.Serve() } diff --git a/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/global.go b/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/global.go index 616d32e21ef7..39ab96453225 100644 --- a/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/global.go +++ b/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/global.go @@ -19,7 +19,6 @@ import ( "errors" "io" "io/ioutil" - "log" "os" "strings" "time" @@ -29,6 +28,7 @@ import ( "github.com/coreos/etcd/pkg/flags" "github.com/coreos/etcd/pkg/transport" "github.com/spf13/cobra" + "google.golang.org/grpc/grpclog" ) // GlobalFlags are flags that defined globally @@ -88,7 +88,9 @@ func mustClientFromCmd(cmd *cobra.Command) *clientv3.Client { ExitWithError(ExitError, derr) } if debug { - clientv3.SetLogger(log.New(os.Stderr, "grpc: ", 0)) + clientv3.SetLogger(grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)) + } else { + clientv3.SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) } endpoints, err := cmd.Flags().GetStringSlice("endpoints") diff --git a/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go b/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go index eb8630fd132c..db12df50afb3 100644 --- a/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go +++ b/vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go @@ -27,7 +27,7 @@ import ( "reflect" "strings" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" diff --git a/vendor/github.com/coreos/etcd/etcdmain/config.go b/vendor/github.com/coreos/etcd/etcdmain/config.go index b8732200ae60..cb211fcf7f38 100644 --- a/vendor/github.com/coreos/etcd/etcdmain/config.go +++ b/vendor/github.com/coreos/etcd/etcdmain/config.go @@ -138,6 +138,10 @@ func newConfig() *config { fs.UintVar(&cfg.TickMs, "heartbeat-interval", cfg.TickMs, "Time (in milliseconds) of a heartbeat interval.") fs.UintVar(&cfg.ElectionMs, "election-timeout", cfg.ElectionMs, "Time (in milliseconds) for an election to timeout.") fs.Int64Var(&cfg.QuotaBackendBytes, "quota-backend-bytes", cfg.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.") + fs.UintVar(&cfg.MaxRequestBytes, "max-request-bytes", cfg.MaxRequestBytes, "Maximum client request size in bytes the server will accept.") + fs.DurationVar(&cfg.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.Config.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.") + fs.DurationVar(&cfg.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.Config.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).") + fs.DurationVar(&cfg.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.Config.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).") // clustering fs.Var(flags.NewURLsValue(embed.DefaultInitialAdvertisePeerURLs), "initial-advertise-peer-urls", "List of this member's peer URLs to advertise to the rest of the cluster.") diff --git a/vendor/github.com/coreos/etcd/etcdmain/etcd.go b/vendor/github.com/coreos/etcd/etcdmain/etcd.go index 2f7f00d61ad7..a70b2502c733 100644 --- a/vendor/github.com/coreos/etcd/etcdmain/etcd.go +++ b/vendor/github.com/coreos/etcd/etcdmain/etcd.go @@ -356,11 +356,22 @@ func identifyDataDirOrDie(dir string) dirType { return dirEmpty } -func setupLogging(cfg *config) { - cfg.ClientTLSInfo.HandshakeFailure = func(conn *tls.Conn, err error) { - plog.Infof("rejected connection from %q (%v)", conn.RemoteAddr().String(), err) +func logTLSHandshakeFailure(conn *tls.Conn, err error) { + state := conn.ConnectionState() + remoteAddr := conn.RemoteAddr().String() + serverName := state.ServerName + if len(state.PeerCertificates) > 0 { + cert := state.PeerCertificates[0] + ips, dns := cert.IPAddresses, cert.DNSNames + plog.Infof("rejected connection from %q (error %q, ServerName %q, IPAddresses %q, DNSNames %q)", remoteAddr, err.Error(), serverName, ips, dns) + } else { + plog.Infof("rejected connection from %q (error %q, ServerName %q)", remoteAddr, err.Error(), serverName) } - cfg.PeerTLSInfo.HandshakeFailure = cfg.ClientTLSInfo.HandshakeFailure +} + +func setupLogging(cfg *config) { + cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure + cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure capnslog.SetGlobalLogLevel(capnslog.INFO) if cfg.Debug { diff --git a/vendor/github.com/coreos/etcd/etcdmain/help.go b/vendor/github.com/coreos/etcd/etcdmain/help.go index cd9282a3192d..70e30d6d8038 100644 --- a/vendor/github.com/coreos/etcd/etcdmain/help.go +++ b/vendor/github.com/coreos/etcd/etcdmain/help.go @@ -66,6 +66,14 @@ member flags: comma-separated whitelist of origins for CORS (cross-origin resource sharing). --quota-backend-bytes '0' raise alarms when backend size exceeds the given quota (0 defaults to low space quota). + --max-request-bytes '1572864' + maximum client request size in bytes the server will accept. + --grpc-keepalive-min-time '5s' + minimum duration interval that a client should wait before pinging server. + --grpc-keepalive-interval '2h' + frequency duration of server-to-client ping to check if a connection is alive (0 to disable). + --grpc-keepalive-timeout '20s' + additional duration of wait before closing a non-responsive connection (0 to disable). clustering flags: diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go index cc4147d2f0c8..c0c07c8d767d 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go @@ -32,19 +32,19 @@ func New(s *etcdserver.EtcdServer) *clientv3.Client { c := clientv3.NewCtxClient(context.Background()) kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s)) - c.KV = clientv3.NewKVFromKVClient(kvc) + c.KV = clientv3.NewKVFromKVClient(kvc, c) lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s)) - c.Lease = clientv3.NewLeaseFromLeaseClient(lc, time.Second) + c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second) wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) - c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc)} + c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)} mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) - c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc) + c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c) clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s)) - c.Cluster = clientv3.NewClusterFromClusterClient(clc) + c.Cluster = clientv3.NewClusterFromClusterClient(clc, c) // TODO: implement clientv3.Auth interface? diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go index eb4b68c0a47a..ac00cbea9837 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: etcdserver/api/v3election/v3electionpb/v3election.proto -// DO NOT EDIT! /* Package v3electionpb is a reverse proxy. @@ -21,10 +20,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray @@ -33,7 +34,7 @@ func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -46,7 +47,7 @@ func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -59,7 +60,7 @@ func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -72,7 +73,7 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.Observe(ctx, &protoReq) @@ -93,7 +94,7 @@ func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -129,7 +130,15 @@ func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.Serve // RegisterElectionHandler registers the http handlers for service Election to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := v3electionpb.NewElectionClient(conn) + return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ElectionClient" to call the correct interceptors. +func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -144,18 +153,19 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Election_Campaign_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -172,18 +182,19 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Election_Proclaim_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -200,18 +211,19 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Election_Leader_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -228,18 +240,19 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Election_Observe_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -256,18 +269,19 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Election_Resign_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go index 2600756e300b..92acb1469e95 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -32,6 +32,8 @@ import ( mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -67,6 +69,27 @@ func (m *CampaignRequest) String() string { return proto.CompactTextS func (*CampaignRequest) ProtoMessage() {} func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } +func (m *CampaignRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *CampaignRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *CampaignRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + type CampaignResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // leader describes the resources used for holding leadereship of the election. @@ -111,6 +134,34 @@ func (m *LeaderKey) String() string { return proto.CompactTextString( func (*LeaderKey) ProtoMessage() {} func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } +func (m *LeaderKey) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LeaderKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LeaderKey) GetRev() int64 { + if m != nil { + return m.Rev + } + return 0 +} + +func (m *LeaderKey) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + type LeaderRequest struct { // name is the election identifier for the leadership information. Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -121,6 +172,13 @@ func (m *LeaderRequest) String() string { return proto.CompactTextStr func (*LeaderRequest) ProtoMessage() {} func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } +func (m *LeaderRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + type LeaderResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kv is the key-value pair representing the latest leader update. @@ -198,6 +256,13 @@ func (m *ProclaimRequest) GetLeader() *LeaderKey { return nil } +func (m *ProclaimRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + type ProclaimResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go index f31cc4014004..5aef4756dfe7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: etcdserver/api/v3lock/v3lockpb/v3lock.proto -// DO NOT EDIT! /* Package v3lockpb is a reverse proxy. @@ -21,10 +20,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray @@ -33,7 +34,7 @@ func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, clien var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -46,7 +47,7 @@ func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, cli var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -82,7 +83,15 @@ func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterLockHandler registers the http handlers for service Lock to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := v3lockpb.NewLockClient(conn) + return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn)) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LockClient" to call the correct interceptors. +func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -97,18 +106,19 @@ func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lock_Lock_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -125,18 +135,19 @@ func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lock_Unlock_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go index 44bde286b2f4..dcf2bad4019c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -25,6 +25,8 @@ import ( etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -59,6 +61,20 @@ func (m *LockRequest) String() string { return proto.CompactTextStrin func (*LockRequest) ProtoMessage() {} func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } +func (m *LockRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LockRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + type LockResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // key is a key that will exist on etcd for the duration that the Lock caller @@ -79,6 +95,13 @@ func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { return nil } +func (m *LockResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + type UnlockRequest struct { // key is the lock ownership key granted by Lock. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -89,6 +112,13 @@ func (m *UnlockRequest) String() string { return proto.CompactTextStr func (*UnlockRequest) ProtoMessage() {} func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } +func (m *UnlockRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + type UnlockResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go index 68a7c120e521..5333491a2e25 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -16,7 +16,10 @@ package v3rpc import ( "crypto/tls" + "io/ioutil" "math" + "os" + "sync" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -25,13 +28,16 @@ import ( "google.golang.org/grpc/grpclog" ) -const maxStreams = math.MaxUint32 +const ( + grpcOverheadBytes = 512 * 1024 + maxStreams = math.MaxUint32 + maxSendBytes = math.MaxInt32 +) -func init() { - grpclog.SetLogger(plog) -} +// integration tests call this multiple times, which is racey in gRPC side +var grpclogOnce sync.Once -func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { +func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { @@ -39,8 +45,10 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) + opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) + opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) - grpcServer := grpc.NewServer(opts...) + grpcServer := grpc.NewServer(append(opts, gopts...)...) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) @@ -49,5 +57,15 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) + grpclogOnce.Do(func() { + if s.Cfg.Debug { + grpc.EnableTracing = true + // enable info, warning, error + grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) + } else { + // only discard info + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + } + }) return grpcServer } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go index 29aef2914a5b..de9470a8905c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { @@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ss.Context()) + md, ok := metadata.FromIncomingContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go index a25d0ce6ad8c..fd27d10c02b1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -92,6 +92,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro return nil } if err != nil { + if isClientCtxErr(stream.Context().Err(), err) { + plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + } else { + plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + } return err } @@ -117,6 +122,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro resp.TTL = ttl err = stream.Send(resp) if err != nil { + if isClientCtxErr(stream.Context().Err(), err) { + plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + } else { + plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + } return err } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 0907e902c67b..bd17179e9977 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -17,6 +17,7 @@ package rpctypes import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( @@ -188,3 +189,10 @@ func Error(err error) error { } return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} } + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index 8d38d9bd18fe..93c5ad30af42 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -15,14 +15,18 @@ package v3rpc import ( + "strings" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" + "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func togRPCError(err error) error { @@ -101,3 +105,35 @@ func togRPCError(err error) error { return grpc.Errorf(codes.Unknown, err.Error()) } } + +func isClientCtxErr(ctxErr error, err error) bool { + if ctxErr != nil { + return true + } + + ev, ok := status.FromError(err) + if !ok { + return false + } + + switch ev.Code() { + case codes.Canceled, codes.DeadlineExceeded: + // client-side context cancel or deadline exceeded + // "rpc error: code = Canceled desc = context canceled" + // "rpc error: code = DeadlineExceeded desc = context deadline exceeded" + return true + case codes.Unavailable: + msg := ev.Message() + // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected") + // "rpc error: code = Unavailable desc = client disconnected" + if msg == "client disconnected" { + return true + } + // "grpc/transport.ClientTransport.CloseStream" on canceled streams + // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL") + if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") { + return true + } + } + return false +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index 84c0a5eac8cd..301ffbaf47f7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -141,6 +141,11 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { // deadlock when calling sws.close(). go func() { if rerr := sws.recvLoop(); rerr != nil { + if isClientCtxErr(stream.Context().Err(), rerr) { + plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + } else { + plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + } errc <- rerr } }() @@ -337,6 +342,11 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(evs)) if err := sws.gRPCStream.Send(wr); err != nil { + if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { + plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error()) + } else { + plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error()) + } return } @@ -353,6 +363,11 @@ func (sws *serverWatchStream) sendLoop() { } if err := sws.gRPCStream.Send(c); err != nil { + if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { + plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) + } else { + plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) + } return } @@ -368,6 +383,11 @@ func (sws *serverWatchStream) sendLoop() { for _, v := range pending[wid] { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { + if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { + plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + } else { + plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + } return } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go index 9c258934a1c0..ae8a4d08e355 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -55,12 +55,17 @@ type ServerConfig struct { AutoCompactionRetention int QuotaBackendBytes int64 + // MaxRequestBytes is the maximum request size to send over raft. + MaxRequestBytes uint + StrictReconfigCheck bool // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool AuthToken string + + Debug bool } // VerifyBootstrap sanity-checks the initial config for bootstrap case diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go index 601df671df80..02a23b78c107 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: etcdserver/etcdserverpb/rpc.proto -// DO NOT EDIT! /* Package etcdserverpb is a reverse proxy. @@ -21,10 +20,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray @@ -33,7 +34,7 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -46,7 +47,7 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client e var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -59,7 +60,7 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -72,7 +73,7 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client e var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -85,7 +86,7 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -150,7 +151,7 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -163,7 +164,7 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -228,7 +229,7 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -241,7 +242,7 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -254,7 +255,7 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -267,7 +268,7 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -280,7 +281,7 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -293,7 +294,7 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -306,7 +307,7 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -319,7 +320,7 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -332,7 +333,7 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -345,7 +346,7 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.Snapshot(ctx, &protoReq) @@ -366,7 +367,7 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -379,7 +380,7 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -392,7 +393,7 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -405,7 +406,7 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -418,7 +419,7 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -431,7 +432,7 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -444,7 +445,7 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -457,7 +458,7 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -470,7 +471,7 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -483,7 +484,7 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -496,7 +497,7 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -509,7 +510,7 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -522,7 +523,7 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -535,7 +536,7 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -548,7 +549,7 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -561,7 +562,7 @@ func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime. var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -597,7 +598,15 @@ func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, e // RegisterKVHandler registers the http handlers for service KV to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := etcdserverpb.NewKVClient(conn) + return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn)) +} + +// RegisterKVHandler registers the http handlers for service KV to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "KVClient" to call the correct interceptors. +func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -612,18 +621,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Range_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -640,18 +650,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Put_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -668,18 +679,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_DeleteRange_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -696,18 +708,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Txn_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -724,18 +737,19 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_KV_Compact_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -794,7 +808,15 @@ func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterWatchHandler registers the http handlers for service Watch to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := etcdserverpb.NewWatchClient(conn) + return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn)) +} + +// RegisterWatchHandler registers the http handlers for service Watch to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WatchClient" to call the correct interceptors. +func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -809,18 +831,19 @@ func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Watch_Watch_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -863,7 +886,15 @@ func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterLeaseHandler registers the http handlers for service Lease to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := etcdserverpb.NewLeaseClient(conn) + return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn)) +} + +// RegisterLeaseHandler registers the http handlers for service Lease to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LeaseClient" to call the correct interceptors. +func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -878,18 +909,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseGrant_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -906,18 +938,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseRevoke_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -934,18 +967,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseKeepAlive_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -962,18 +996,19 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1028,7 +1063,15 @@ func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeM // RegisterClusterHandler registers the http handlers for service Cluster to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := etcdserverpb.NewClusterClient(conn) + return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn)) +} + +// RegisterClusterHandler registers the http handlers for service Cluster to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ClusterClient" to call the correct interceptors. +func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1043,18 +1086,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1071,18 +1115,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberRemove_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1099,18 +1144,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberUpdate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1127,18 +1173,19 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Cluster_MemberList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1193,7 +1240,15 @@ func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se // RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := etcdserverpb.NewMaintenanceClient(conn) + return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn)) +} + +// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MaintenanceClient" to call the correct interceptors. +func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1208,18 +1263,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Alarm_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1236,18 +1292,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Status_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1264,18 +1321,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Defragment_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1292,18 +1350,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Hash_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1320,18 +1379,19 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Maintenance_Snapshot_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1390,7 +1450,15 @@ func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterAuthHandler registers the http handlers for service Auth to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := etcdserverpb.NewAuthClient(conn) + return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn)) +} + +// RegisterAuthHandler registers the http handlers for service Auth to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "AuthClient" to call the correct interceptors. +func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1405,18 +1473,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthEnable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1433,18 +1502,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_AuthDisable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1461,18 +1531,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_Authenticate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1489,18 +1560,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1517,18 +1589,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1545,18 +1618,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1573,18 +1647,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1601,18 +1676,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserChangePassword_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1629,18 +1705,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserGrantRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1657,18 +1734,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_UserRevokeRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1685,18 +1763,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1713,18 +1792,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1741,18 +1821,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1769,18 +1850,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1797,18 +1879,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleGrantPermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1825,18 +1908,19 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Auth_RoleRevokePermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 018a3652be7a..894c815f824c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -15,6 +15,8 @@ import ( authpb "github.com/coreos/etcd/auth/authpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -223,6 +225,34 @@ func (m *ResponseHeader) String() string { return proto.CompactTextSt func (*ResponseHeader) ProtoMessage() {} func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type RangeRequest struct { // key is the first key for the range. If range_end is not given, the request only looks up key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -273,6 +303,97 @@ func (m *RangeRequest) String() string { return proto.CompactTextStri func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + type RangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. @@ -303,6 +424,20 @@ func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { return nil } +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + type PutRequest struct { // key is the key, in bytes, to put into the key-value store. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -327,6 +462,48 @@ func (m *PutRequest) String() string { return proto.CompactTextString func (*PutRequest) ProtoMessage() {} func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. @@ -371,6 +548,27 @@ func (m *DeleteRangeRequest) String() string { return proto.CompactTe func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. @@ -391,6 +589,13 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { if m != nil { return m.PrevKvs @@ -761,6 +966,27 @@ func (m *Compare) GetTargetUnion() isCompare_TargetUnion { return nil } +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + func (m *Compare) GetVersion() int64 { if x, ok := m.GetTargetUnion().(*Compare_Version); ok { return x.Version @@ -957,6 +1183,13 @@ func (m *TxnResponse) GetHeader() *ResponseHeader { return nil } +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + func (m *TxnResponse) GetResponses() []*ResponseOp { if m != nil { return m.Responses @@ -980,6 +1213,20 @@ func (m *CompactionRequest) String() string { return proto.CompactTex func (*CompactionRequest) ProtoMessage() {} func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + type CompactionResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1022,6 +1269,13 @@ func (m *HashResponse) GetHeader() *ResponseHeader { return nil } +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + type SnapshotRequest struct { } @@ -1052,6 +1306,20 @@ func (m *SnapshotResponse) GetHeader() *ResponseHeader { return nil } +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -1205,6 +1473,48 @@ func (m *WatchCreateRequest) String() string { return proto.CompactTe func (*WatchCreateRequest) ProtoMessage() {} func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` @@ -1215,6 +1525,13 @@ func (m *WatchCancelRequest) String() string { return proto.CompactTe func (*WatchCancelRequest) ProtoMessage() {} func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + type WatchResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. @@ -1253,6 +1570,41 @@ func (m *WatchResponse) GetHeader() *ResponseHeader { return nil } +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + func (m *WatchResponse) GetEvents() []*mvccpb.Event { if m != nil { return m.Events @@ -1272,6 +1624,20 @@ func (m *LeaseGrantRequest) String() string { return proto.CompactTex func (*LeaseGrantRequest) ProtoMessage() {} func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseGrantResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. @@ -1293,6 +1659,27 @@ func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1303,6 +1690,13 @@ func (m *LeaseRevokeRequest) String() string { return proto.CompactTe func (*LeaseRevokeRequest) ProtoMessage() {} func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseRevokeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1329,6 +1723,13 @@ func (m *LeaseKeepAliveRequest) String() string { return proto.Compac func (*LeaseKeepAliveRequest) ProtoMessage() {} func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseKeepAliveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1349,6 +1750,20 @@ func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1361,6 +1776,20 @@ func (m *LeaseTimeToLiveRequest) String() string { return proto.Compa func (*LeaseTimeToLiveRequest) ProtoMessage() {} func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + type LeaseTimeToLiveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1385,6 +1814,34 @@ func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + type Member struct { // ID is the member ID for this member. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1401,6 +1858,34 @@ func (m *Member) String() string { return proto.CompactTextString(m) func (*Member) ProtoMessage() {} func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` @@ -1411,6 +1896,13 @@ func (m *MemberAddRequest) String() string { return proto.CompactText func (*MemberAddRequest) ProtoMessage() {} func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. @@ -1455,6 +1947,13 @@ func (m *MemberRemoveRequest) String() string { return proto.CompactT func (*MemberRemoveRequest) ProtoMessage() {} func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after removing the member. @@ -1492,6 +1991,20 @@ func (m *MemberUpdateRequest) String() string { return proto.CompactT func (*MemberUpdateRequest) ProtoMessage() {} func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after updating the member. @@ -1591,6 +2104,27 @@ func (m *AlarmRequest) String() string { return proto.CompactTextStri func (*AlarmRequest) ProtoMessage() {} func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` @@ -1603,6 +2137,20 @@ func (m *AlarmMember) String() string { return proto.CompactTextStrin func (*AlarmMember) ProtoMessage() {} func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. @@ -1662,6 +2210,41 @@ func (m *StatusResponse) GetHeader() *ResponseHeader { return nil } +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type AuthEnableRequest struct { } @@ -1688,6 +2271,20 @@ func (m *AuthenticateRequest) String() string { return proto.CompactT func (*AuthenticateRequest) ProtoMessage() {} func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` @@ -1698,6 +2295,20 @@ func (m *AuthUserAddRequest) String() string { return proto.CompactTe func (*AuthUserAddRequest) ProtoMessage() {} func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1707,6 +2318,13 @@ func (m *AuthUserGetRequest) String() string { return proto.CompactTe func (*AuthUserGetRequest) ProtoMessage() {} func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserDeleteRequest struct { // name is the name of the user to delete. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1717,6 +2335,13 @@ func (m *AuthUserDeleteRequest) String() string { return proto.Compac func (*AuthUserDeleteRequest) ProtoMessage() {} func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1731,6 +2356,20 @@ func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` @@ -1743,6 +2382,20 @@ func (m *AuthUserGrantRoleRequest) String() string { return proto.Com func (*AuthUserGrantRoleRequest) ProtoMessage() {} func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` @@ -1753,6 +2406,20 @@ func (m *AuthUserRevokeRoleRequest) String() string { return proto.Co func (*AuthUserRevokeRoleRequest) ProtoMessage() {} func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1763,6 +2430,13 @@ func (m *AuthRoleAddRequest) String() string { return proto.CompactTe func (*AuthRoleAddRequest) ProtoMessage() {} func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } @@ -1772,6 +2446,13 @@ func (m *AuthRoleGetRequest) String() string { return proto.CompactTe func (*AuthRoleGetRequest) ProtoMessage() {} func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserListRequest struct { } @@ -1797,6 +2478,13 @@ func (m *AuthRoleDeleteRequest) String() string { return proto.Compac func (*AuthRoleDeleteRequest) ProtoMessage() {} func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1811,6 +2499,13 @@ func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { if m != nil { return m.Perm @@ -1831,6 +2526,27 @@ func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + type AuthEnableResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1881,6 +2597,13 @@ func (m *AuthenticateResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + type AuthUserAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1914,6 +2637,13 @@ func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2037,6 +2767,13 @@ func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` @@ -2054,6 +2791,13 @@ func (m *AuthUserListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + type AuthRoleDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -10511,7 +11255,24 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.ProgressNotify = bool(v != 0) case 5: - if wireType == 2 { + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -10552,23 +11313,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.Filters = append(m.Filters, v) } - } else if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go index 38a96f719a09..271c5e773137 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -82,7 +82,8 @@ const ( releaseDelayAfterSnapshot = 30 * time.Second // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 + maxPendingRevokes = 16 + recommendedMaxRequestBytes = 10 * 1024 * 1024 ) var ( @@ -259,6 +260,10 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { cl *membership.RaftCluster ) + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { return nil, fmt.Errorf("cannot access data directory: %v", terr) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go index 870f98e069f9..ae449bbf22ff 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -33,12 +33,6 @@ import ( ) const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. // However, if the committed entries are very heavy to apply, the gap might grow. @@ -556,7 +550,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In return nil, err } - if len(data) > maxRequestBytes { + if len(data) > int(s.Cfg.MaxRequestBytes) { return nil, ErrRequestTooLarge } diff --git a/vendor/github.com/coreos/etcd/glide.lock b/vendor/github.com/coreos/etcd/glide.lock index 1d866cf7c56f..67d83d8fdf67 100644 --- a/vendor/github.com/coreos/etcd/glide.lock +++ b/vendor/github.com/coreos/etcd/glide.lock @@ -1,5 +1,5 @@ -hash: cee1f2629857e9c2384ad89ff6014db09498c9af53771e5144ad3a4b510ff00e -updated: 2017-05-30T10:29:08.22609283-07:00 +hash: 57308341a6ff76ce7960119ca6f589d2f5476c056f1f38f9a32552d9e68509d8 +updated: 2017-12-19T13:02:46.509863-08:00 imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 @@ -7,10 +7,10 @@ imports: - quantile - name: github.com/bgentry/speakeasy version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd -- name: github.com/boltdb/bolt - version: 583e8937c61f1af6513608ccc75c97b6abdf4ff9 - name: github.com/cockroachdb/cmux version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 +- name: github.com/coreos/bbolt + version: 32c383e75ce054674c53b5a07e55de85332aee14 - name: github.com/coreos/go-semver version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6 subpackages: @@ -27,7 +27,7 @@ imports: - capnslog - dlopen - name: github.com/cpuguy83/go-md2man - version: bcc0a711c5e6bbe72c7cb13d81c7109b45267fd2 + version: 23709d0847197db6021a51fdb193e66e9222d4e7 subpackages: - md2man - name: github.com/dgrijalva/jwt-go @@ -37,7 +37,7 @@ imports: - name: github.com/ghodss/yaml version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 - name: github.com/gogo/protobuf - version: 909568be09de550ed094403c2bf8a261b5bb730a + version: 100ba4e885062801d56799d78530b73b178a78f3 subpackages: - proto - name: github.com/golang/groupcache @@ -45,16 +45,22 @@ imports: subpackages: - lru - name: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 + version: 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 subpackages: - jsonpb - proto + - protoc-gen-go/descriptor + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/struct + - ptypes/timestamp - name: github.com/google/btree version: 925471ac9e2131377a91e1595defec898166fe49 - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 - name: github.com/grpc-ecosystem/grpc-gateway - version: 18d159699f2e83fc5bb9ef2f79465ca3f3122676 + version: 8cc3a55af3bcf171a1c23a90c4df9cf591706104 subpackages: - runtime - runtime/internal @@ -94,7 +100,7 @@ imports: subpackages: - xfs - name: github.com/russross/blackfriday - version: 0ba0f2b6ed7c475a92e4df8641825cb7a11d1fa3 + version: 4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c - name: github.com/spf13/cobra version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b - name: github.com/spf13/pflag @@ -108,12 +114,12 @@ imports: - name: github.com/xiang90/probing version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2 - name: golang.org/x/crypto - version: 1351f936d976c60a0a48d728281922cf63eafb8d + version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3 subpackages: - bcrypt - blowfish - name: golang.org/x/net - version: c8c74377599bd978aee1cf3b9b63a8634051cec2 + version: 66aacef3dd8a676686c7ae3716979581e8b03c47 subpackages: - context - http2 @@ -137,18 +143,30 @@ imports: version: c06e80d9300e4443158a03817b8a8cb37d230320 subpackages: - rate +- name: google.golang.org/genproto + version: 09f6ed296fc66555a25fe4ce95173148778dfa85 + subpackages: + - googleapis/api/annotations + - googleapis/rpc/status - name: google.golang.org/grpc - version: 8050b9cbc271307e5a716a9d782803d09b0d6f2d + version: 5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e subpackages: + - balancer - codes + - connectivity - credentials + - grpclb/grpc_lb_v1/messages - grpclog + - health + - health/grpc_health_v1 - internal - keepalive - metadata - naming - peer + - resolver - stats + - status - tap - transport - name: gopkg.in/cheggaaa/pb.v1 diff --git a/vendor/github.com/coreos/etcd/glide.yaml b/vendor/github.com/coreos/etcd/glide.yaml index 90bcfeddc1ca..b3bd3fef82a5 100644 --- a/vendor/github.com/coreos/etcd/glide.yaml +++ b/vendor/github.com/coreos/etcd/glide.yaml @@ -2,8 +2,8 @@ package: github.com/coreos/etcd import: - package: github.com/bgentry/speakeasy version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd -- package: github.com/boltdb/bolt - version: v1.3.0 +- package: github.com/coreos/bbolt + version: v1.3.1-coreos.5 - package: github.com/cockroachdb/cmux version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 - package: github.com/coreos/go-semver @@ -25,7 +25,7 @@ import: - package: github.com/ghodss/yaml version: v1.0.0 - package: github.com/gogo/protobuf - version: v0.3 + version: v0.4 subpackages: - proto - package: github.com/golang/groupcache @@ -33,14 +33,14 @@ import: subpackages: - lru - package: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 + version: 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 subpackages: - jsonpb - proto - package: github.com/google/btree version: 925471ac9e2131377a91e1595defec898166fe49 - package: github.com/grpc-ecosystem/grpc-gateway - version: v1.2.0 + version: v1.3.0 subpackages: - runtime - runtime/internal @@ -78,12 +78,12 @@ import: - package: github.com/grpc-ecosystem/go-grpc-prometheus version: v1.1 - package: golang.org/x/crypto - version: 1351f936d976c60a0a48d728281922cf63eafb8d + version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3 subpackages: - bcrypt - blowfish - package: golang.org/x/net - version: c8c74377599bd978aee1cf3b9b63a8634051cec2 + version: 66aacef3dd8a676686c7ae3716979581e8b03c47 subpackages: - context - http2 @@ -97,7 +97,7 @@ import: subpackages: - rate - package: google.golang.org/grpc - version: v1.2.1 + version: v1.7.5 subpackages: - codes - credentials @@ -107,9 +107,28 @@ import: - naming - peer - transport + - health + - health/grpc_health_v1 - package: gopkg.in/cheggaaa/pb.v1 version: v1.0.2 - package: gopkg.in/yaml.v2 version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b - package: github.com/dgrijalva/jwt-go version: v3.0.0 +- package: github.com/beorn7/perks + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +- package: github.com/cpuguy83/go-md2man + version: 23709d0847197db6021a51fdb193e66e9222d4e7 +- package: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c +- package: github.com/prometheus/client_model + version: 6f3806018612930941127f2a7c6c453ba2c527d2 +- package: github.com/russross/blackfriday + version: 4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c +- package: golang.org/x/text + version: 4ee4af566555f5fbe026368b75596286a312663a +- package: google.golang.org/genproto + version: 09f6ed296fc66555a25fe4ce95173148778dfa85 + subpackages: + - googleapis/api/annotations + - googleapis/rpc/status diff --git a/vendor/github.com/coreos/etcd/hack/patch/README.md b/vendor/github.com/coreos/etcd/hack/patch/README.md new file mode 100644 index 000000000000..4f8282e6a268 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/patch/README.md @@ -0,0 +1,37 @@ +# ./hack/patch/cherrypick.sh + +Handles cherry-picks of PR(s) from etcd master to a stable etcd release branch automatically. + +## Setup + +Set the `UPSTREAM_REMOTE` and `FORK_REMOTE` environment variables. +`UPSTREAM_REMOTE` should be set to git remote name of `github.com/coreos/etcd`, +and `FORK_REMOTE` should be set to the git remote name of the forked etcd +repo (`github.com/${github-username}/etcd`). Use `git remotes -v` to +look up the git remote names. If etcd has not been forked, create +one on github.com and register it locally with `git remote add ...`. + + +``` +export UPSTREAM_REMOTE=origin +export FORK_REMOTE=${github-username} +export GITHUB_USER=${github-username} +``` + +Next, install hub from https://github.com/github/hub + +## Usage + +To cherry pick PR 12345 onto release-3.2 and propose is as a PR, run: + +```sh +./hack/patch/cherrypick.sh ${UPSTREAM_REMOTE}/release-3.2 12345 +``` + +To cherry pick 12345 then 56789 and propose them togther as a single PR, run: + +``` +./hack/patch/cherrypick.sh ${UPSTREAM_REMOTE}/release-3.2 12345 56789 +``` + + diff --git a/vendor/github.com/coreos/etcd/hack/patch/cherrypick.sh b/vendor/github.com/coreos/etcd/hack/patch/cherrypick.sh new file mode 100755 index 000000000000..4faf3efa2730 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/patch/cherrypick.sh @@ -0,0 +1,229 @@ +#!/usr/bin/env bash + +# Based on github.com/kubernetes/kubernetes/blob/v1.8.2/hack/cherry_pick_pull.sh + +# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How +# meta.) Assumes you care about pulls from remote "upstream" and +# checks thems out to a branch named: +# automated-cherry-pick-of--- + +set -o errexit +set -o nounset +set -o pipefail + +declare -r ETCD_ROOT="$(dirname "${BASH_SOURCE}")/../.." +cd "${ETCD_ROOT}" + +declare -r STARTINGBRANCH=$(git symbolic-ref --short HEAD) +declare -r REBASEMAGIC="${ETCD_ROOT}/.git/rebase-apply" +DRY_RUN=${DRY_RUN:-""} +REGENERATE_DOCS=${REGENERATE_DOCS:-""} +UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream} +FORK_REMOTE=${FORK_REMOTE:-origin} + +if [[ -z ${GITHUB_USER:-} ]]; then + echo "Please export GITHUB_USER= (or GH organization, if that's where your fork lives)" + exit 1 +fi + +if ! which hub > /dev/null; then + echo "Can't find 'hub' tool in PATH, please install from https://github.com/github/hub" + exit 1 +fi + +if [[ "$#" -lt 2 ]]; then + echo "${0} ...: cherry pick one or more onto and leave instructions for proposing pull request" + echo + echo " Checks out and handles the cherry-pick of (possibly multiple) for you." + echo " Examples:" + echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR." + echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR." + echo + echo " Set the DRY_RUN environment var to skip git push and creating PR." + echo " This is useful for creating patches to a release branch without making a PR." + echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked." + echo + echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits." + echo " This is useful when picking commits containing changes to API documentation." + echo + echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)" + echo " To override the default remote names to what you have locally." + exit 2 +fi + +if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then + echo "!!! Dirty tree. Clean up and try again." + exit 1 +fi + +if [[ -e "${REBASEMAGIC}" ]]; then + echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again." + exit 1 +fi + +declare -r BRANCH="$1" +shift 1 +declare -r PULLS=( "$@" ) + +function join { local IFS="$1"; shift; echo "$*"; } +declare -r PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789" +declare -r PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789" + +echo "+++ Updating remotes..." +git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}" + +if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then + echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21." + echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)" + exit 1 +fi + +declare -r NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools. +declare -r NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')" +declare -r NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)" +echo "+++ Creating local branch ${NEWBRANCHUNIQ}" + +cleanbranch="" +prtext="" +gitamcleanup=false +function return_to_kansas { + if [[ "${gitamcleanup}" == "true" ]]; then + echo + echo "+++ Aborting in-progress git am." + git am --abort >/dev/null 2>&1 || true + fi + + # return to the starting branch and delete the PR text file + if [[ -z "${DRY_RUN}" ]]; then + echo + echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up." + git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true + if [[ -n "${cleanbranch}" ]]; then + git branch -D "${cleanbranch}" >/dev/null 2>&1 || true + fi + if [[ -n "${prtext}" ]]; then + rm "${prtext}" + fi + fi +} +trap return_to_kansas EXIT + +SUBJECTS=() +function make-a-pr() { + local rel="$(basename "${BRANCH}")" + echo + echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}" + + # This looks like an unnecessary use of a tmpfile, but it avoids + # https://github.com/github/hub/issues/976 Otherwise stdin is stolen + # when we shove the heredoc at hub directly, tickling the ioctl + # crash. + prtext="$(mktemp -t prtext.XXXX)" # cleaned in return_to_kansas + cat >"${prtext}" <&2 + exit 1 + fi + done + + if [[ "${conflicts}" != "true" ]]; then + echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'" + exit 1 + fi + } + + # set the subject + subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //') + SUBJECTS+=("#${pull}: ${subject}") + + # remove the patch file from /tmp + rm -f "/tmp/${pull}.patch" +done +gitamcleanup=false + +# Re-generate docs (if needed) +if [[ -n "${REGENERATE_DOCS}" ]]; then + echo + echo "Regenerating docs..." + if ! hack/generate-docs.sh; then + echo + echo "hack/generate-docs.sh FAILED to complete." + exit 1 + fi +fi + +if [[ -n "${DRY_RUN}" ]]; then + echo "!!! Skipping git push and PR creation because you set DRY_RUN." + echo "To return to the branch you were in when you invoked this script:" + echo + echo " git checkout ${STARTINGBRANCH}" + echo + echo "To delete this branch:" + echo + echo " git branch -D ${NEWBRANCHUNIQ}" + exit 0 +fi + +if git remote -v | grep ^${FORK_REMOTE} | grep etcd/etcd.git; then + echo "!!! You have ${FORK_REMOTE} configured as your etcd/etcd.git" + echo "This isn't normal. Leaving you with push instructions:" + echo + echo "+++ First manually push the branch this script created:" + echo + echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}" + echo + echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)." + echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values." + echo + make-a-pr + cleanbranch="" + exit 0 +fi + +echo +echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):" +echo +echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}" +echo +read -p "+++ Proceed (anything but 'y' aborts the cherry-pick)? [y/n] " -r +if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then + echo "Aborting." >&2 + exit 1 +fi + +git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}" +make-a-pr diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/Makefile b/vendor/github.com/coreos/etcd/hack/scripts-dev/Makefile new file mode 100644 index 000000000000..1942da97dd2c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/Makefile @@ -0,0 +1,294 @@ +# run from repository root +# +# Example: +# make clean -f ./hack/scripts-dev/Makefile +# make build -f ./hack/scripts-dev/Makefile + +.PHONY: build +build: + GO_BUILD_FLAGS="-v" ./build + ./bin/etcd --version + ETCDCTL_API=3 ./bin/etcdctl version + +clean: + rm -f ./codecov + rm -rf ./agent-* + rm -rf ./covdir + rm -f ./*.log + rm -f ./bin/Dockerfile-release + rm -rf ./bin/*.etcd + rm -rf ./gopath + rm -rf ./release + rm -f ./integration/127.0.0.1:* ./integration/localhost:* + rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:* + rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:* + +_GO_VERSION = 1.9.2 +ifdef GO_VERSION + _GO_VERSION = $(GO_VERSION) +endif + +# Example: +# GO_VERSION=1.8.5 make build-docker-test -f ./hack/scripts-dev/Makefile +# make build-docker-test -f ./hack/scripts-dev/Makefile +# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io +# GO_VERSION=1.8.5 make push-docker-test -f ./hack/scripts-dev/Makefile +# make push-docker-test -f ./hack/scripts-dev/Makefile +# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com +# GO_VERSION=1.8.5 make pull-docker-test -f ./hack/scripts-dev/Makefile +# make pull-docker-test -f ./hack/scripts-dev/Makefile + +build-docker-test: + $(info GO_VERSION: $(_GO_VERSION)) + @cat ./Dockerfile-test | sed s/REPLACE_ME_GO_VERSION/$(_GO_VERSION)/ \ + > ./.Dockerfile-test + docker build \ + --tag gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \ + --file ./.Dockerfile-test . + +push-docker-test: + $(info GO_VERSION: $(_GO_VERSION)) + gcloud docker -- push gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) + +pull-docker-test: + $(info GO_VERSION: $(_GO_VERSION)) + docker pull gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) + +compile-with-docker-test: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --volume=`pwd`/:/etcd \ + gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version" + +# Local machine: +# TEST_OPTS="PASSES='fmt'" make test -f ./hack/scripts-dev/Makefile +# TEST_OPTS="PASSES='fmt bom dep compile build unit'" make test -f ./hack/scripts-dev/Makefile +# TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional'" make test -f ./hack/scripts-dev/Makefile +# TEST_OPTS="PASSES='build grpcproxy'" make test -f ./hack/scripts-dev/Makefile +# +# Example (test with docker): +# make pull-docker-test -f ./hack/scripts-dev/Makefile +# TEST_OPTS="PASSES='fmt'" make docker-test -f ./hack/scripts-dev/Makefile +# TEST_OPTS="VERBOSE=2 PASSES='unit'" make docker-test -f ./hack/scripts-dev/Makefile +# +# Travis CI (test with docker): +# TEST_OPTS="PASSES='fmt bom dep compile build unit'" make docker-test -f ./hack/scripts-dev/Makefile +# +# Semaphore CI (test with docker): +# TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional'" make docker-test -f ./hack/scripts-dev/Makefile +# TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" make docker-test -f ./hack/scripts-dev/Makefile +# +# grpc-proxy tests (test with docker): +# TEST_OPTS="PASSES='build grpcproxy'" make docker-test -f ./hack/scripts-dev/Makefile + +TEST_SUFFIX = $(shell date +%s | base64 | head -c 15) + +_TEST_OPTS = "PASSES='unit'" +ifdef TEST_OPTS + _TEST_OPTS = $(TEST_OPTS) +endif + +.PHONY: test +test: + $(info TEST_OPTS: $(_TEST_OPTS)) + $(info log-file: test-$(TEST_SUFFIX).log) + $(_TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log + ! egrep "(--- FAIL:|panic: test timed out|appears to have leaked|Too many goroutines)" -B50 -A10 test-$(TEST_SUFFIX).log + +docker-test: + $(info GO_VERSION: $(_GO_VERSION)) + $(info TEST_OPTS: $(_TEST_OPTS)) + $(info log-file: test-$(TEST_SUFFIX).log) + docker run \ + --rm \ + --volume=/tmp:/tmp \ + --volume=`pwd`:/go/src/github.com/coreos/etcd \ + gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \ + /bin/bash -c "$(_TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log" + ! egrep "(--- FAIL:|panic: test timed out|appears to have leaked|Too many goroutines)" -B50 -A10 test-$(TEST_SUFFIX).log + +docker-test-coverage: + $(info GO_VERSION: $(_GO_VERSION)) + $(info log-file: docker-test-coverage-$(TEST_SUFFIX).log) + docker run \ + --rm \ + --volume=/tmp:/tmp \ + --volume=`pwd`:/go/src/github.com/coreos/etcd \ + gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \ + /bin/bash -c "COVERDIR=covdir PASSES='build build_cov cov' ./test 2>&1 | tee docker-test-coverage-$(TEST_SUFFIX).log && /codecov -t 6040de41-c073-4d6f-bbf8-d89256ef31e1" + ! egrep "(--- FAIL:|panic: test timed out|appears to have leaked|Too many goroutines)" -B50 -A10 docker-test-coverage-$(TEST_SUFFIX).log + +# build release container image with Linux +_ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound") +ifdef ETCD_VERSION + _ETCD_VERSION = $(ETCD_VERSION) +endif + +# Example: +# ETCD_VERSION=v3.3.0-test.0 make build-docker-release-master -f ./hack/scripts-dev/Makefile +# ETCD_VERSION=v3.3.0-test.0 make push-docker-release-master -f ./hack/scripts-dev/Makefile +# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com + +build-docker-release-master: compile-with-docker-test + $(info ETCD_VERSION: $(_ETCD_VERSION)) + cp ./Dockerfile-release ./bin/Dockerfile-release + docker build \ + --tag gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \ + --file ./bin/Dockerfile-release \ + ./bin + rm -f ./bin/Dockerfile-release + + docker run \ + --rm \ + gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \ + /bin/sh -c "/usr/local/bin/etcd --version && ETCDCTL_API=3 /usr/local/bin/etcdctl version" + +push-docker-release-master: + $(info ETCD_VERSION: $(_ETCD_VERSION)) + gcloud docker -- push gcr.io/etcd-development/etcd:$(_ETCD_VERSION) + +# Example: +# make build-docker-test -f ./hack/scripts-dev/Makefile +# make compile-with-docker-test -f ./hack/scripts-dev/Makefile +# make build-docker-dns-test -f ./hack/scripts-dev/Makefile +# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io +# make push-docker-dns-test -f ./hack/scripts-dev/Makefile +# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com +# make pull-docker-dns-test -f ./hack/scripts-dev/Makefile +# make docker-dns-test-certs-run -f ./hack/scripts-dev/Makefile +# make docker-dns-test-certs-gateway-run -f ./hack/scripts-dev/Makefile +# make docker-dns-test-certs-wildcard-run -f ./hack/scripts-dev/Makefile + +build-docker-dns-test: + $(info GO_VERSION: $(_GO_VERSION)) + @cat ./hack/scripts-dev/docker-dns/Dockerfile | sed s/REPLACE_ME_GO_VERSION/$(_GO_VERSION)/ \ + > ./hack/scripts-dev/docker-dns/.Dockerfile + + docker build \ + --tag gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \ + --file ./hack/scripts-dev/docker-dns/.Dockerfile \ + ./hack/scripts-dev/docker-dns + + docker run \ + --rm \ + --dns 127.0.0.1 \ + gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \ + /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local" + +push-docker-dns-test: + $(info GO_VERSION: $(_GO_VERSION)) + gcloud docker -- push gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) + +pull-docker-dns-test: + $(info GO_VERSION: $(_GO_VERSION)) + docker pull gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) + +docker-dns-test-certs-run: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --tty \ + --dns 127.0.0.1 \ + --volume=/tmp:/tmp \ + --volume=`pwd`/bin:/etcd \ + --volume=`pwd`/hack/scripts-dev/docker-dns/certs:/certs \ + gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" + +docker-dns-test-certs-gateway-run: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --tty \ + --dns 127.0.0.1 \ + --volume=/tmp:/tmp \ + --volume=`pwd`/bin:/etcd \ + --volume=`pwd`/hack/scripts-dev/docker-dns/certs-gateway:/certs-gateway \ + gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd" + +docker-dns-test-certs-wildcard-run: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --tty \ + --dns 127.0.0.1 \ + --volume=/tmp:/tmp \ + --volume=`pwd`/bin:/etcd \ + --volume=`pwd`/hack/scripts-dev/docker-dns/certs-wildcard:/certs-wildcard \ + gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd" + +# Example: +# make build-docker-test -f ./hack/scripts-dev/Makefile +# make compile-with-docker-test -f ./hack/scripts-dev/Makefile +# make build-docker-dns-srv-test -f ./hack/scripts-dev/Makefile +# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io +# make push-docker-dns-srv-test -f ./hack/scripts-dev/Makefile +# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com +# make pull-docker-dns-srv-test -f ./hack/scripts-dev/Makefile +# make docker-dns-srv-test-certs-run -f ./hack/scripts-dev/Makefile +# make docker-dns-srv-test-certs-gateway-run -f ./hack/scripts-dev/Makefile +# make docker-dns-srv-test-certs-wildcard-run -f ./hack/scripts-dev/Makefile + +build-docker-dns-srv-test: + $(info GO_VERSION: $(_GO_VERSION)) + @cat ./hack/scripts-dev/docker-dns-srv/Dockerfile | sed s/REPLACE_ME_GO_VERSION/$(_GO_VERSION)/ \ + > ./hack/scripts-dev/docker-dns-srv/.Dockerfile + + docker build \ + --tag gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \ + --file ./hack/scripts-dev/docker-dns-srv/.Dockerfile \ + ./hack/scripts-dev/docker-dns-srv + + docker run \ + --rm \ + --dns 127.0.0.1 \ + gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \ + /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local" + +push-docker-dns-srv-test: + $(info GO_VERSION: $(_GO_VERSION)) + gcloud docker -- push gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) + +pull-docker-dns-srv-test: + $(info GO_VERSION: $(_GO_VERSION)) + docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) + +docker-dns-srv-test-certs-run: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --tty \ + --dns 127.0.0.1 \ + --volume=/tmp:/tmp \ + --volume=`pwd`/bin:/etcd \ + --volume=`pwd`/hack/scripts-dev/docker-dns-srv/certs:/certs \ + gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" + +docker-dns-srv-test-certs-gateway-run: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --tty \ + --dns 127.0.0.1 \ + --volume=/tmp:/tmp \ + --volume=`pwd`/bin:/etcd \ + --volume=`pwd`/hack/scripts-dev/docker-dns-srv/certs-gateway:/certs-gateway \ + gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd" + +docker-dns-srv-test-certs-wildcard-run: + $(info GO_VERSION: $(_GO_VERSION)) + docker run \ + --rm \ + --tty \ + --dns 127.0.0.1 \ + --volume=/tmp:/tmp \ + --volume=`pwd`/bin:/etcd \ + --volume=`pwd`/hack/scripts-dev/docker-dns-srv/certs-wildcard:/certs-wildcard \ + gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \ + /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd" \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/README b/vendor/github.com/coreos/etcd/hack/scripts-dev/README new file mode 100644 index 000000000000..16c3e583d30a --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/README @@ -0,0 +1,2 @@ + +scripts for etcd development diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/Dockerfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/Dockerfile new file mode 100644 index 000000000000..07e907214420 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/Dockerfile @@ -0,0 +1,44 @@ +FROM ubuntu:16.10 + +RUN rm /bin/sh && ln -s /bin/bash /bin/sh +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +RUN apt-get -y update \ + && apt-get -y install \ + build-essential \ + gcc \ + apt-utils \ + pkg-config \ + software-properties-common \ + apt-transport-https \ + libssl-dev \ + sudo \ + bash \ + curl \ + tar \ + git \ + netcat \ + bind9 \ + dnsutils \ + && apt-get -y update \ + && apt-get -y upgrade \ + && apt-get -y autoremove \ + && apt-get -y autoclean + +ENV GOROOT /usr/local/go +ENV GOPATH /go +ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} +ENV GO_VERSION REPLACE_ME_GO_VERSION +ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang +RUN rm -rf ${GOROOT} \ + && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ + && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ + && go version \ + && go get -v -u github.com/mattn/goreman + +RUN mkdir -p /var/bind /etc/bind +RUN chown root:bind /var/bind /etc/bind + +ADD named.conf etcd.zone rdns.zone /etc/bind/ +RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone +ADD resolv.conf /etc/resolv.conf diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/Procfile new file mode 100644 index 000000000000..7c5d07e288bd --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/Procfile @@ -0,0 +1,7 @@ +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth + +gateway: ./etcd gateway start --discovery-srv etcd.local --trusted-ca-file /certs-gateway/ca.crt --listen-addr 127.0.0.1:23790 diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca.crt new file mode 100644 index 000000000000..19b26c455514 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUbQA3lX1hcR1W8D5wmmAwaLp4AWQwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTI5MDBaFw0yNzExMjkxOTI5 +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDdZjG+dJixdUuZLIlPVE/qvqNqbgIQy3Hrgq9OlPevLu3FAKIgTHoSKugq +jOuBjzAtmbGTky3PPmkjWrOUWKEUYMuJJzXA1fO2NALXle47NVyVVfuwCmDnaAAL +Sw4QTZKREoe3EwswbeYguQinCqazRwbXMzzfypIfaHAyGrqFCq12IvarrjfDcamm +egtPkxNNdj1QHbkeYXcp76LOSBRjD2B3bzZvyVv/wPORaGTFXQ0feGz/93/Y/E0z +BL5TdZ84qmgKxW04hxkhhuuxsL5zDNpbXcGm//Zw9qzO/AvtEux6ag9t0JziiEtj +zLz5M7yXivfG4oxEeLKTieS/1ZkbAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBR7XtZP3fc6ElgHl6hdSHLmrFWj +MzANBgkqhkiG9w0BAQsFAAOCAQEAPy3ol3CPyFxuWD0IGKde26p1mT8cdoaeRbOa +2Z3GMuRrY2ojaKMfXuroOi+5ZbR9RSvVXhVX5tEMOSy81tb5OGPZP24Eroh4CUfK +bw7dOeBNCm9tcmHkV+5frJwOgjN2ja8W8jBlV1flLx+Jpyk2PSGun5tQPsDlqzor +E8QQ2FzCzxoGiEpB53t5gKeX+mH6gS1c5igJ5WfsEGXBC4xJm/u8/sg30uCGP6kT +tCoQ8gnvGen2OqYJEfCIEk28/AZJvJ90TJFS3ExXJpyfImK9j5VcTohW+KvcX5xF +W7M6KCGVBQtophobt3v/Zs4f11lWck9xVFCPGn9+LI1dbJUIIQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/run.sh new file mode 100755 index 000000000000..ef4c1667cf9f --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/run.sh @@ -0,0 +1,47 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs-gateway/Procfile start & + +# TODO: remove random sleeps +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --discovery-srv etcd.local \ + endpoint health --cluster + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --discovery-srv etcd.local \ + put abc def + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --discovery-srv etcd.local \ + get abc + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=127.0.0.1:23790 \ + put ghi jkl + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=127.0.0.1:23790 \ + get ghi diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server-ca-csr.json new file mode 100644 index 000000000000..72bd3808288a --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server-ca-csr.json @@ -0,0 +1,23 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "hosts": [ + "m1.etcd.local", + "m2.etcd.local", + "m3.etcd.local", + "etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.crt new file mode 100644 index 000000000000..ef591cc7cc90 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIENTCCAx2gAwIBAgIUcviGEkA57QgUUFUIuB23kO/jHWIwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTI5MDBaFw0yNzExMjkxOTI5 +MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL6rB1Kh08Fo +FieWqzB4WvKxSFjLWlNfAXbSC1IEPEc/2JOSTF/VfsEX7Xf4eDlTUIZ/TpMS4nUE +Jn0rOIxDJWieQgF99a88CKCwVeqyiQ1iGlI/Ls78P7712QJ1QvcYPBRCvAFo2VLg +TSNhq4taRtAnP690TJVKMSxHg7qtMIpiBLc8ryNbtNUkQHl7/puiBZVVFwHQZm6d +ZRkfMqXWs4+VKLTx0pqJaM0oWVISQlLWQV83buVsuDVyLAZu2MjRYZwBj9gQwZDO +15VGvacjMU+l1+nLRuODrpGeGlxwfT57jqipbUtTsoZFsGxPdIWn14M6Pzw/mML4 +guYLKv3UqkkCAwEAAaOB1TCB0jAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFKYKYVPu +XPnZ2j0NORiNPUJpBnhkMB8GA1UdIwQYMBaAFHte1k/d9zoSWAeXqF1IcuasVaMz +MFMGA1UdEQRMMEqCDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 +Y2QubG9jYWyCCmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0B +AQsFAAOCAQEAK40lD6Nx/V6CaShL95fQal7mFp/LXiyrlFTqCqrCruVnntwpukSx +I864bNMxVSTStEA3NM5V4mGuYjRvdjS65LBhaS1MQDPb4ofPj0vnxDOx6fryRIsB +wYKDuT4LSQ7pV/hBfL/bPb+itvb24G4/ECbduOprrywxmZskeEm/m0WqUb1A08Hv +6vDleyt382Wnxahq8txhMU+gNLTGVne60hhfLR+ePK7MJ4oyk3yeUxsmsnBkYaOu +gYOak5nWzRa09dLq6/vHQLt6n0AB0VurMAjshzO2rsbdOkD233sdkvKiYpayAyEf +Iu7S5vNjP9jiUgmws6G95wgJOd2xv54D4Q== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.key.insecure new file mode 100644 index 000000000000..623457b5dab2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-gateway/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAvqsHUqHTwWgWJ5arMHha8rFIWMtaU18BdtILUgQ8Rz/Yk5JM +X9V+wRftd/h4OVNQhn9OkxLidQQmfSs4jEMlaJ5CAX31rzwIoLBV6rKJDWIaUj8u +zvw/vvXZAnVC9xg8FEK8AWjZUuBNI2Gri1pG0Cc/r3RMlUoxLEeDuq0wimIEtzyv +I1u01SRAeXv+m6IFlVUXAdBmbp1lGR8ypdazj5UotPHSmolozShZUhJCUtZBXzdu +5Wy4NXIsBm7YyNFhnAGP2BDBkM7XlUa9pyMxT6XX6ctG44OukZ4aXHB9PnuOqKlt +S1OyhkWwbE90hafXgzo/PD+YwviC5gsq/dSqSQIDAQABAoIBAEAOsb0fRUdbMuZG +BmmYZeXXjdjXKReNea5zzv3VEnNVjeu2YRZpYdZ5tXxy6+FGjm1BZCKhW5e4tz2i +QbNN88l8MezSZrJi1vs1gwgAx27JoNI1DALaWIhNjIT45HCjobuk2AkZMrpXRVM3 +wyxkPho8tXa6+efGL1MTC7yx5vb2dbhnEsjrPdUO0GLVP56bgrz7vRk+hE772uq2 +QDenZg+PcH+hOhptbY1h9CYotGWYXCpi0+yoHhsh5PTcEpyPmLWSkACsHovm3MIn +a5oU0uh28nVBfYE0Sk6I9XBERHVO/OrCvz4Y3ZbVyGpCdLcaMB5wI1P4a5ULV52+ +VPrALQkCgYEA+w85KYuL+eUjHeMqa8V8A9xgcl1+dvB8SXgfRRm5QTqxgetzurD9 +G7vgMex42nqgoW1XUx6i9roRk3Qn3D2NKvBJcpMohYcY3HcGkCsBwtNUCyOWKasS +Oj2q9LzPjVqTFII0zzarQ85XuuZyTRieFAMoYmsS8O/GcapKqYhPIDMCgYEAwmuR +ctnCNgoEj1NaLBSAcq7njONvYUFvbXO8BCyd1WeLZyz/krgXxuhQh9oXIccWAKX2 +uxIDaoWV8F5c8bNOkeebHzVHfaLpwl4IlLa/i5WTIc+IZmpBR0aiS021k/M3KkDg +KnQXAer6jEymT3lUL0AqZd+GX6DjFw61zPOFH5MCgYAnCiv6YN/IYTA/woZjMddi +Bk/dGNrEhgrdpdc++IwNL6JQsJtTaZhCSsnHGZ2FY9I8p/MPUtFGipKXGlXkcpHU +Hn9dWLLRaLud9MhJfNaORCxqewMrwZVZByPhYMbplS8P3lt16WtiZODRiGo3wN87 +/221OC8+1hpGrJNln3OmbwKBgDV8voEoY4PWcba0qcQix8vFTrK2B3hsNimYg4tq +cum5GOMDwDQvLWttkmotl9uVF/qJrj19ES+HHN8KNuvP9rexTj3hvI9V+JWepSG0 +vTG7rsTIgbAbX2Yqio/JC0Fu0ihvvLwxP/spGFDs7XxD1uNA9ekc+6znaFJ5m46N +GHy9AoGBAJmGEv5+rM3cucRyYYhE7vumXeCLXyAxxaf0f7+1mqRVO6uNGNGbNY6U +Heq6De4yc1VeAXUpkGQi/afPJNMU+fy8paCjFyzID1yLvdtFOG38KDbgMmj4t+cH +xTp2RT3MkcCWPq2+kXZeQjPdesPkzdB+nA8ckaSursV908n6AHcM +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/Procfile new file mode 100644 index 000000000000..02613553c307 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/Procfile @@ -0,0 +1,5 @@ +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca.crt new file mode 100644 index 000000000000..c89d6531c948 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUWzsBehxAkgLLYBUZEUpSjHkIaMowDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTUxODAyMDBaFw0yNzExMTMxODAy +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCxjHVNtcCSCz1w9AiN7zAql0ZsPN6MNQWJ2j3iPCvmy9oi0wqSfYXTs+xw +Y4Q+j0dfA54+PcyIOSBQCZBeLLIwCaXN+gLkMxYEWCCVgWYUa6UY+NzPKRCfkbwG +oE2Ilv3R1FWIpMqDVE2rLmTb3YxSiw460Ruv4l16kodEzfs4BRcqrEiobBwaIMLd +0rDJju7Q2TcioNji+HFoXV2aLN58LDgKO9AqszXxW88IKwUspfGBcsA4Zti/OHr+ +W+i/VxsxnQSJiAoKYbv9SkS8fUWw2hQ9SBBCKqE3jLzI71HzKgjS5TiQVZJaD6oK +cw8FjexOELZd4r1+/p+nQdKqwnb5AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRLfPxmhlZix1eTdBMAzMVlAnOV +gTANBgkqhkiG9w0BAQsFAAOCAQEAeT2NfOt3WsBLUVcnyGMeVRQ0gXazxJXD/Z+3 +2RF3KClqBLuGmPUZVl0FU841J6hLlwNjS33mye7k2OHrjJcouElbV3Olxsgh/EV0 +J7b7Wf4zWYHFNZz/VxwGHunsEZ+SCXUzU8OiMrEcHkOVzhtbC2veVPJzrESqd88z +m1MseGW636VIcrg4fYRS9EebRPFvlwfymMd+bqLky9KsUbjNupYd/TlhpAudrIzA +wO9ZUDb/0P44iOo+xURCoodxDTM0vvfZ8eJ6VZ/17HIf/a71kvk1oMqEhf060nmF +IxnbK6iUqqhV8DLE1869vpFvgbDdOxP7BeabN5FXEnZFDTLDqg== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/run.sh new file mode 100755 index 000000000000..13e16bda9951 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/run.sh @@ -0,0 +1,33 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs-wildcard/Procfile start & + +# TODO: remove random sleeps +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-wildcard/ca.crt \ + --cert=/certs-wildcard/server.crt \ + --key=/certs-wildcard/server.key.insecure \ + --discovery-srv etcd.local \ + endpoint health --cluster + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-wildcard/ca.crt \ + --cert=/certs-wildcard/server.crt \ + --key=/certs-wildcard/server.key.insecure \ + --discovery-srv etcd.local \ + put abc def + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-wildcard/ca.crt \ + --cert=/certs-wildcard/server.crt \ + --key=/certs-wildcard/server.key.insecure \ + --discovery-srv etcd.local \ + get abc diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server-ca-csr.json new file mode 100644 index 000000000000..fd9adae03eb6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server-ca-csr.json @@ -0,0 +1,21 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "hosts": [ + "*.etcd.local", + "etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.crt new file mode 100644 index 000000000000..385f0321ca80 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFjCCAv6gAwIBAgIUCIUuNuEPRjp/EeDBNHipRI/qoAcwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTUxODAyMDBaFw0yNzExMTMxODAy +MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzoOebyKdXF +5QiVs0mB3cVqMRgRoRGWt9emIOsYCX89SBaRNOIAByop98Vb1GmUDNDv1qR4Oq+m +4JlWhgZniABWpekFw8mpN8wMIT86DoNnTe64ouLkDQRZDYOBO9I2+r4EuschRxNs ++Hh5W9JzX/eOomnOhaZfTp6EaxczRHnVmgkWuFUnacfUf7W2FE/HAYfjYpvXw5/+ +eT9AW+Jg/b9SkyU9XKEpWZT7NMqF9OXDXYdxHtRNTGxasLEqPZnG58mqR2QFU2me +/motY24faZpHo8i9ASb03Vy6xee2/FlS6cj2POCGQx3oLZsiQdgIOva7JrQtRsCn +e5P0Wk4qk+cCAwEAAaOBtjCBszAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCI+fP2T +xgvJG68Xdgamg4lzGRX1MB8GA1UdIwQYMBaAFEt8/GaGVmLHV5N0EwDMxWUCc5WB +MDQGA1UdEQQtMCuCDCouZXRjZC5sb2NhbIIKZXRjZC5sb2NhbIIJbG9jYWxob3N0 +hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQASub3+YZAXJ8x8b55Hl7FkkIt+rML1 +LdgPHsolNntNXeSqVJ4oi4KvuaM0ueFf/+AlTusTAbXWbi/qiG5Tw24xyzY6NGgV +/vCs56YqNlFyr3bNp1QJlnV3JQ4d3KqosulJ5jk+InhjAKJKomMH01pYhhStRAKg +1fNwSyD34oyZpSQL0Z7X7wdaMGdOmzxwE99EG6jmYl/P7MiP6rC0WP1elIF4sCGM +jY6oewvIMj0zWloBf/NlzrcY7VKpPqvBnV65Tllyo5n4y1sc8y2uzgJO/QnVKqhp +Sdd/74mU8dSh3ALSOqkbmIBhqig21jP7GBgNCNdmsaR2LvPI97n1PYE7 +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.key.insecure new file mode 100644 index 000000000000..2b6595fa880d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs-wildcard/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzOg55vIp1cXlCJWzSYHdxWoxGBGhEZa316Yg6xgJfz1IFpE0 +4gAHKin3xVvUaZQM0O/WpHg6r6bgmVaGBmeIAFal6QXDyak3zAwhPzoOg2dN7rii +4uQNBFkNg4E70jb6vgS6xyFHE2z4eHlb0nNf946iac6Fpl9OnoRrFzNEedWaCRa4 +VSdpx9R/tbYUT8cBh+Nim9fDn/55P0Bb4mD9v1KTJT1coSlZlPs0yoX05cNdh3Ee +1E1MbFqwsSo9mcbnyapHZAVTaZ7+ai1jbh9pmkejyL0BJvTdXLrF57b8WVLpyPY8 +4IZDHegtmyJB2Ag69rsmtC1GwKd7k/RaTiqT5wIDAQABAoIBAF0nTfuCKCa5WtA2 +TlWippGzHzKUASef32A4dEqsmNSxpW4tAV+lJ5yxi6S7hKui1Ni/0FLhHbzxHrZX +MYMD2j5dJfvz1Ph+55DqCstVt3dhpXpbkiGYD5rkaVJZlDqTKBbuy4LvzAI2zhbn +BSl9rik7PPbhHr1uIq3KAW2Arya7dlpPZiEX04Dg9xqZvxZkxt9IM25E+uzTWKSR +v5BRmijWiGJ6atujgmP7KcYtgBC5EDR9yZf2uK+hnsKEcH94TUkTnJriTcOCKVbb +isAuzsxStLpmyibfiLXD55aYjzr7KRVzQpoVXGJ4vJfs7lTxqxXBjUIsBJMPBcck +ATabIcECgYEA8C8JeKPmcA4KaGFSusF5OsXt4SM9jz5Kr7larA+ozuuR/z0m4pnx +AdjwQiGlhXaMtyziZ7Uwx+tmfnJDijpE/hUnkcAIKheDLXB/r1VpJdj/mqXtK49Y +mnOxV66TcWAmXav31TgmLVSj0SYLGEnvV4MPbgJroMg3VO7LnNbNL7cCgYEA2maB +Edbn4pJqUjVCZG68m0wQHmFZFOaoYZLeR3FgH+PQYIzUj96TP9XFpOwBmYAl2jiM +kQZ3Q6VQY37rwu0M+2BVFkQFnFbelH5jXbHDLdoeFDGCRnJkH2VG1kE/rPfzVsiz +NFDJD+17kPw3tTdHwDYGHwxyNuEoBQw3q6hfXVECgYBEUfzttiGMalIHkveHbBVh +5H9f9ThDkMKJ7b2fB+1KvrOO2QRAnO1zSxQ8m3mL10b7q+bS/TVdCNbkzPftT9nk +NHxG90rbPkjwGfoYE8GPJITApsYqB+J6PMKLYHtMWr9PEeWzXv9tEZBvo9SwGgfc +6sjuz/1xhMJIhIyilm9TTQKBgHRsYDGaVlK5qmPYcGQJhBFlItKPImW579jT6ho7 +nfph/xr49/cZt3U4B/w6sz+YyJTjwEsvHzS4U3o2lod6xojaeYE9EaCdzllqZp3z +vRAcThyFp+TV5fm2i2R7s+4I33dL1fv1dLlA57YKPcgkh+M26Vxzzg7jR+oo8SRY +xT2BAoGBAKNR60zpSQZ2SuqEoWcj1Nf+KloZv2tZcnsHhqhiugbYhZOQVyTCNipa +Ib3/BGERCyI7oWMk0yTTQK4wg3+0EsxQX10hYJ5+rd4btWac7G/tjo2+BSaTnWSW +0vWM/nu33Pq0JHYIo0q0Jee0evTgizqH9UJ3wI5LG29LKwurXxPW +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/Procfile new file mode 100644 index 000000000000..0b54c56650a4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/Procfile @@ -0,0 +1,5 @@ +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca.crt new file mode 100644 index 000000000000..b853bf4ccf46 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUfPEaJnrBzeHM8echLjsPOsV1IzUwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMjIxNzMzMDBaFw0yNzExMjAxNzMz +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDDU14WMuV1AC+6wDWRF6itx71EljW7Prw2drhuxOC3bE+QQx4LGcY2OP9N +9MC9u9M0s8waGDAbZvdLmCMfAAJoJ05rLcO7F2XEr7Ww7jUWl7+B/sW8ENQiqtUY +1JqLVjwducxmfHspAmSkhEpDBTiTFsya/i1Ic+ctfxDLtsNGgQuA9mCiBvuUhbWG +CkB0JpuL4s6LMuDukQHpZZCDnq0Y26M9sZnjmowbdRoQlhVId6Tl5b5b4Y3qLLbe +r1E+VChcPpOYrKhXBOW/dT5ph/fIQDuVKN6E5Z54AMm3fKsP3MLGBCMfFqIVg1+s +BZA5/Jau+US8Ll4bn8sy/HK1xoy/AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSZZ+PEsPywCRKo/fxY2eSnI0wQ +IDANBgkqhkiG9w0BAQsFAAOCAQEAFU4QXMGx8zr8rKAp/IyGipDp/aQ49qYXPjIt +c92rzbYo11sJmBEXiYIOGuZdBBeawIzYsM8dW59LFO8ZcMq/gISBcS5ilqllw6SG +20UrFEKNzcPoRwXp3GSbSGr5PxTgWYWpwJaDa0j2qiM4PB9/IuTBqr6Vu1Olhx06 +mXztYl4UL0HPkuB4Td+BIhjc+ZpxCfBOOBpiwAyeh4SpJ3cpZrbyz7JAsCTtywzy +lVO4lfcmxTWwruRyYAnexHdBvnqa8GZw1gufZoSbMTsN4Zz/j3j9T2LG1Q0Agi7o +MhqPqhG/9ISjA0G3bu2B/jHbmWMVbb+ueEYtAz5JHFik2snRtA== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/run.sh new file mode 100755 index 000000000000..066c9df672d0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/run.sh @@ -0,0 +1,33 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs/Procfile start & + +# TODO: remove random sleeps +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs/ca.crt \ + --cert=/certs/server.crt \ + --key=/certs/server.key.insecure \ + --discovery-srv etcd.local \ + endpoint health --cluster + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs/ca.crt \ + --cert=/certs/server.crt \ + --key=/certs/server.key.insecure \ + --discovery-srv etcd.local \ + put abc def + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs/ca.crt \ + --cert=/certs/server.crt \ + --key=/certs/server.key.insecure \ + --discovery-srv etcd.local \ + get abc diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server-ca-csr.json new file mode 100644 index 000000000000..72bd3808288a --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server-ca-csr.json @@ -0,0 +1,23 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "hosts": [ + "m1.etcd.local", + "m2.etcd.local", + "m3.etcd.local", + "etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.crt new file mode 100644 index 000000000000..938fcf8f8a1b --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIENTCCAx2gAwIBAgIUPr4J62m04v7Sr5rFop1P0+VbN+8wDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMjIxNzMzMDBaFw0yNzExMjAxNzMz +MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOZuU1wqUMoI +/Vkxo5ep8vGxgCg38c0PdxAJX4ViEBRIsKxnjMUmgMWEes9bJ14wrqQ2G3l0tSSr +nOtRPRGeSBAsiFKU41sRdHZQgZKhWXKvOqLlll9tgTmAypXeYt1zrtV8zPan3AWn +OYz+FdO41BESmg00SctcIVoP57keSkr/binJuwy+e1w6Z8Prnoc+OqsFvjp6RPNH +ZJYKsBziYVldg3RN0K/1MQBP587AhF0Dh+iTqnMWhJwbAGw82j7b7jgJnatMvj0L +e/nunxB9BgWaRl4Xq0WueFBfVSLIYUspTogpaz2bUsIAxV3xbRRbpiFY/eqT6nSK +grR6Qc8oOVsCAwEAAaOB1TCB0jAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFE4dpGTp ++hE0TR9Ku1wf1/GQ9zVjMB8GA1UdIwQYMBaAFJln48Sw/LAJEqj9/FjZ5KcjTBAg +MFMGA1UdEQRMMEqCDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 +Y2QubG9jYWyCCmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0B +AQsFAAOCAQEADDh4aThZsXaXkAluZP1yC+gc+z+gJT88SeBgIX11++3SqzERCcWv +71boMeYGDa/TuvDtAXQcZAtfNdjcZCxPGPoDuOYMksEMk/+oekb8JR1Nfd9jgRr+ +0MD2Hh6ElM9F/FXO+NHavAbtbTjbEGXGXCciGqL/fPw4AF0bAIQjiIE69wiZgCfM +1/+wR2+paZ+CxE3QZZKUhgoDRPY91J8KCiDPHvZRafQEulzb8w4G7h8TUy1xjZPw +UQfHsquLQHIfCHVHSn2yubMrlMbdJPhnJT35APBa7Uj0TYwb1tuFQ/xbO2GKoq3f +T7Rad1T50qRTqsRZzPdG4lZjAgnybjJUIQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.key.insecure new file mode 100644 index 000000000000..73cb4aa07400 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/certs/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA5m5TXCpQygj9WTGjl6ny8bGAKDfxzQ93EAlfhWIQFEiwrGeM +xSaAxYR6z1snXjCupDYbeXS1JKuc61E9EZ5IECyIUpTjWxF0dlCBkqFZcq86ouWW +X22BOYDKld5i3XOu1XzM9qfcBac5jP4V07jUERKaDTRJy1whWg/nuR5KSv9uKcm7 +DL57XDpnw+uehz46qwW+OnpE80dklgqwHOJhWV2DdE3Qr/UxAE/nzsCEXQOH6JOq +cxaEnBsAbDzaPtvuOAmdq0y+PQt7+e6fEH0GBZpGXherRa54UF9VIshhSylOiClr +PZtSwgDFXfFtFFumIVj96pPqdIqCtHpBzyg5WwIDAQABAoIBAQDBdpk4RTLFHV0P +uLRfzkjxkRRHMAksIDLXXPc8tkNHtGvYo6u1jokIzByL4T0hQIAv0Fmq1EiNfCPo +EbHTC+/23Fyr8OMdf38nIppW8G538hSp1VY10mtvSulLgIXC5bBA/2HaKL56ZJbW +ADF1K7Woi9SZB3B5c2VxBu+HJZ48bbZLFoKMw+48998K/S0Msh4NeZ3Lq75i2LmZ +GhPmeR2d922UAO72hgP8h771Cejz3bd0mdFGtbwSS+1vpseFsZHu8yQjBAbP13o2 +e6+SpZf7Yndeg1Wv/WALiKFFTIfqnpVtVhMqD+nx/0DweW1b1vdDVz+LmPPUyvxR +owhQV9b5AoGBAPNPSgMxlMvsaoTo08AU6YjZgfqMAxJNgVU/KsyK9qhq/O9Q9O8d +OKt/kehdeYQOkkM77mLTtcDlFfbg6NmNnN7iBMY9v5iZP8U14avjmvjDKrwigsK+ +HWuFlA7RpmecIwHH17ya32PydnoM7MMH46N28fSnAR7bIgZC3USmUfYtAoGBAPJz +E8Gcf9eVox5o5hhhocLtjFQcXxjcL3Bxz1qFPNvQ440s/7ubGORPoDzOf1lPyxI9 +HewZTJ/aP8lyhPwGC0+O3mH6Gwr2YflaoLdZxBAX0gliPKI0OWsH73RGkBxUte46 +ugTgKXpwtvM9R7pENJbP8lOFKdg5EoA6ZjIKCmqnAoGBAMMXT4wyBFJi9aIuoiNB +YWQmq47/FzNkzBBTfvjVcCPo7Xji3BKixp7UwmSkFtxpZqPceS/q+7B4v9zdyDcw +0pjwd82RE4DDWJvDsXjHHqraqviBX4HROPvO9sHPHvOzAWrbF8QWFosojhEdLfbP +65pVtHpsMnzQTn7gvFTgW5XdAoGAepDYfPlL28Wm99mZ8NtydmO2nFLXdG7jgJnY +dG+E6683SghkpAftVoY2gGb4FEN1apwBA3lqtikUNBezyOCZWTfljmxsvWb+8prx +Qp+bsXMJWHsUIf/6wvP5BrQhaGEes/d2UL6t2Vsf8emZ2D1gxJkNbVGVbNy1UKO1 +RDi1OWMCgYB+DZ/CvJ8i6VwzOm/SXtycuDJZ96NGwjpK4A71HoocrVi1phGMlOp+ +c48XR0Xr2/AEfFsmcTIilI2ShsjN4u9YDXJK8Efek2EX77pP6MsUXuSZ6i1OS9wP +5WPYypGxNXsZU99D78UBV9PohWqp4LkBSP/55sFBcd3iyLbdHlthLA== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/etcd.zone b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/etcd.zone new file mode 100644 index 000000000000..b9cebbba4c39 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/etcd.zone @@ -0,0 +1,21 @@ +$TTL 86400 +@ IN SOA etcdns.local. root.etcdns.local. ( + 100500 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 86400 ) ; Negative Cache TTL + IN NS ns.etcdns.local. + IN A 127.0.0.1 + +ns IN A 127.0.0.1 +m1 IN A 127.0.0.1 +m2 IN A 127.0.0.1 +m3 IN A 127.0.0.1 + +_etcd-client-ssl._tcp IN SRV 0 0 2379 m1.etcd.local. +_etcd-server-ssl._tcp IN SRV 0 0 2380 m1.etcd.local. +_etcd-client-ssl._tcp IN SRV 0 0 22379 m2.etcd.local. +_etcd-server-ssl._tcp IN SRV 0 0 22380 m2.etcd.local. +_etcd-client-ssl._tcp IN SRV 0 0 32379 m3.etcd.local. +_etcd-server-ssl._tcp IN SRV 0 0 32380 m3.etcd.local. diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/named.conf b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/named.conf new file mode 100644 index 000000000000..83549305c34f --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/named.conf @@ -0,0 +1,23 @@ +options { + directory "/var/bind"; + listen-on { 127.0.0.1; }; + listen-on-v6 { none; }; + allow-transfer { + none; + }; + // If you have problems and are behind a firewall: + query-source address * port 53; + pid-file "/var/run/named/named.pid"; + allow-recursion { none; }; + recursion no; +}; + +zone "etcd.local" IN { + type master; + file "/etc/bind/etcd.zone"; +}; + +zone "0.0.127.in-addr.arpa" { + type master; + file "/etc/bind/rdns.zone"; +}; diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/rdns.zone b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/rdns.zone new file mode 100644 index 000000000000..fb71b30b1fad --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/rdns.zone @@ -0,0 +1,13 @@ +$TTL 86400 +@ IN SOA etcdns.local. root.etcdns.local. ( + 100500 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 86400 ) ; Negative Cache TTL + IN NS ns.etcdns.local. + IN A 127.0.0.1 + +1 IN PTR m1.etcd.local. +1 IN PTR m2.etcd.local. +1 IN PTR m3.etcd.local. diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/resolv.conf b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/resolv.conf new file mode 100644 index 000000000000..bbc8559cd54f --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns-srv/resolv.conf @@ -0,0 +1 @@ +nameserver 127.0.0.1 diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/Dockerfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/Dockerfile new file mode 100644 index 000000000000..07e907214420 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/Dockerfile @@ -0,0 +1,44 @@ +FROM ubuntu:16.10 + +RUN rm /bin/sh && ln -s /bin/bash /bin/sh +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +RUN apt-get -y update \ + && apt-get -y install \ + build-essential \ + gcc \ + apt-utils \ + pkg-config \ + software-properties-common \ + apt-transport-https \ + libssl-dev \ + sudo \ + bash \ + curl \ + tar \ + git \ + netcat \ + bind9 \ + dnsutils \ + && apt-get -y update \ + && apt-get -y upgrade \ + && apt-get -y autoremove \ + && apt-get -y autoclean + +ENV GOROOT /usr/local/go +ENV GOPATH /go +ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} +ENV GO_VERSION REPLACE_ME_GO_VERSION +ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang +RUN rm -rf ${GOROOT} \ + && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ + && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ + && go version \ + && go get -v -u github.com/mattn/goreman + +RUN mkdir -p /var/bind /etc/bind +RUN chown root:bind /var/bind /etc/bind + +ADD named.conf etcd.zone rdns.zone /etc/bind/ +RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone +ADD resolv.conf /etc/resolv.conf diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/Procfile new file mode 100644 index 000000000000..a0ea061acd03 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/Procfile @@ -0,0 +1,6 @@ +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name/server.crt --peer-key-file=/certs-common-name/server.key.insecure --peer-trusted-ca-file=/certs-common-name/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name/server.crt --key-file=/certs-common-name/server.key.insecure --trusted-ca-file=/certs-common-name/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name/server.crt --peer-key-file=/certs-common-name/server.key.insecure --peer-trusted-ca-file=/certs-common-name/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name/server.crt --key-file=/certs-common-name/server.key.insecure --trusted-ca-file=/certs-common-name/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name/server.crt --peer-key-file=/certs-common-name/server.key.insecure --peer-trusted-ca-file=/certs-common-name/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name/server.crt --key-file=/certs-common-name/server.key.insecure --trusted-ca-file=/certs-common-name/ca.crt --client-cert-auth \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca.crt new file mode 100644 index 000000000000..00faeca22a55 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUdASu5zT1US/6LPyKmczbC3NgdY4wDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTQwNjIzMDBaFw0yNzExMTIwNjIz +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDBbE44RP/Tk9l7KShzxQAypatoqDJQL32hyw8plZIfni5XFIlG2GwyjNvX +wiP6u0YcsApZKc58ytqcHQqMyk68OTTxcM+HVWvKHMKOBPBYgXeeVnD+7Ixuinq/ +X6RK3n2jEipFgE9FiAXDNICF3ZQz+HVNBSbzwCjBtIcYkinWHX+kgnQkFT1NnmuZ +uloz6Uh7/Ngn/XPNSsoMyLrh4TwDsx/fQEpVcrXMbxWux1xEHmfDzRKvE7VhSo39 +/mcpKBOwTg4jwh9tDjxWX4Yat+/cX0cGxQ7JSrdy14ESV5AGBmesGHd2SoWhZK9l +tWm1Eq0JYWD+Cd5yNrODTUxWRNs9AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSZMjlLnc7Vv2mxRMebo5ezJ7gt +pzANBgkqhkiG9w0BAQsFAAOCAQEAA2d2nV4CXjp7xpTQrh8sHzSBDYUNr9DY5hej +52X6q8WV0N3QC7Utvv2Soz6Ol72/xoGajIJvqorsIBB5Ms3dgCzPMy3R01Eb3MzI +7KG/4AGVEiAKUBkNSD8PWD7bREnnv1g9tUftE7jWsgMaPIpi6KhzhyJsClT4UsKQ +6Lp+Be80S293LrlmUSdZ/v7FAvMzDGOLd2iTlTr1fXK6YJJEXpk3+HIi8nbUPvYQ +6O8iOtf5QoCm1yMLJQMFvNr51Z1EeF935HRj8U2MJP5jXPW4/UY2TAUBcWEhlNsK +6od+f1B8xGe/6KHvF0C8bg23kj8QphM/E7HCZiVgdm6FNf54AQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/run.sh new file mode 100755 index 000000000000..6d3bb026f70e --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/run.sh @@ -0,0 +1,255 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs-common-name/Procfile start & +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379 \ + endpoint health --cluster + +sleep 2s +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + put abc def + +sleep 2s +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + get abc + +sleep 1s && printf "\n" +echo "Step 1. creating root role" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + role add root + +sleep 1s && printf "\n" +echo "Step 2. granting readwrite 'foo' permission to role 'root'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + role grant-permission root readwrite foo + +sleep 1s && printf "\n" +echo "Step 3. getting role 'root'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + role get root + +sleep 1s && printf "\n" +echo "Step 4. creating user 'root'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --interactive=false \ + user add root:123 + +sleep 1s && printf "\n" +echo "Step 5. granting role 'root' to user 'root'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + user grant-role root root + +sleep 1s && printf "\n" +echo "Step 6. getting user 'root'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + user get root + +sleep 1s && printf "\n" +echo "Step 7. enabling auth" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + auth enable + +sleep 1s && printf "\n" +echo "Step 8. writing 'foo' with 'root:123'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + put foo bar + +sleep 1s && printf "\n" +echo "Step 9. writing 'aaa' with 'root:123'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + put aaa bbb + +sleep 1s && printf "\n" +echo "Step 10. writing 'foo' without 'root:123'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + put foo bar + +sleep 1s && printf "\n" +echo "Step 11. reading 'foo' with 'root:123'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + get foo + +sleep 1s && printf "\n" +echo "Step 12. reading 'aaa' with 'root:123'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + get aaa + +sleep 1s && printf "\n" +echo "Step 13. creating a new user 'test-common-name:test-pass'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + --interactive=false \ + user add test-common-name:test-pass + +sleep 1s && printf "\n" +echo "Step 14. creating a role 'test-role'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + role add test-role + +sleep 1s && printf "\n" +echo "Step 15. granting readwrite 'aaa' --prefix permission to role 'test-role'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + role grant-permission test-role readwrite aaa --prefix + +sleep 1s && printf "\n" +echo "Step 16. getting role 'test-role'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + role get test-role + +sleep 1s && printf "\n" +echo "Step 17. granting role 'test-role' to user 'test-common-name'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=root:123 \ + user grant-role test-common-name test-role + +sleep 1s && printf "\n" +echo "Step 18. writing 'aaa' with 'test-common-name:test-pass'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=test-common-name:test-pass \ + put aaa bbb + +sleep 1s && printf "\n" +echo "Step 19. writing 'bbb' with 'test-common-name:test-pass'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=test-common-name:test-pass \ + put bbb bbb + +sleep 1s && printf "\n" +echo "Step 20. reading 'aaa' with 'test-common-name:test-pass'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=test-common-name:test-pass \ + get aaa + +sleep 1s && printf "\n" +echo "Step 21. reading 'bbb' with 'test-common-name:test-pass'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + --user=test-common-name:test-pass \ + get bbb + +sleep 1s && printf "\n" +echo "Step 22. writing 'aaa' with CommonName 'test-common-name'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + put aaa ccc + +sleep 1s && printf "\n" +echo "Step 23. reading 'aaa' with CommonName 'test-common-name'" +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-common-name/ca.crt \ + --cert=/certs-common-name/server.crt \ + --key=/certs-common-name/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + get aaa diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server-ca-csr.json new file mode 100644 index 000000000000..6a57789b1abb --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server-ca-csr.json @@ -0,0 +1,23 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "test-common-name", + "hosts": [ + "m1.etcd.local", + "m2.etcd.local", + "m3.etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.crt new file mode 100644 index 000000000000..b9719b2f013d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIERDCCAyygAwIBAgIUO500NxhwBHJsodbGKbo5NsW9/p8wDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTQwNjIzMDBaFw0yNzExMTIwNjIz +MDBaMH0xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTEZMBcGA1UEAxMQdGVzdC1jb21tb24tbmFtZTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAMRvVMj3+5jAhRng4izVm4zrvMBnHNMh2MOFVTp7 +wdhEF2en7pFsKzWgczewil6v4d6QzJpgB9yQzPT2q0SOvetpbqP950y6MdPHAF9D +qZd0+wC+RLdSmK5oQKzgZER/vH3eSbTa1UdwaLBHlT6PiTzGm+gEYL43gr3kle+A +9c7aT9pkJWQFTCSdqwcQopyHEwgrfPHC8Bdn804soG4HtR9Gg/R4xtlu7ir6LTHn +vpPBScaMZDUQ5UNrEMh8TM8/sXG6oxqo86r5wpVQt6vscnTMrTTUqq+Mo/OJnDAf +plaqkWX5NfIJ9tmE2V06hq1/ptQkl714Wb+ske+aJ2Poc/UCAwEAAaOByTCBxjAO +BgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwG +A1UdEwEB/wQCMAAwHQYDVR0OBBYEFEG2hXyVTpxLXTse3fXe0U/g0F8kMB8GA1Ud +IwQYMBaAFJkyOUudztW/abFEx5ujl7MnuC2nMEcGA1UdEQRAMD6CDW0xLmV0Y2Qu +bG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcE +fwAAATANBgkqhkiG9w0BAQsFAAOCAQEADtH0NZBrWfXTUvTa3WDsa/JPBhiPu/kH ++gRxOD5UNeDX9+QAx/gxGHrCh4j51OUx55KylUe0qAPHHZ4vhgD2lCRBqFLYx69m +xRIzVnt5NCruriskxId1aFTZ5pln5KK5tTVkAp04MBHZOgv8giXdRWn+7TtMyJxj +wVGf8R7/bwJGPPJFrLNtN4EWwXv/a2/SEoZd8fkTxzw12TeJ8w1PnkH4Zer+nzNb +dH5f+OIBGGZ2fIWANX5g9JEJvvsxBBL8uoCrFE/YdnD0fLyhoplSOVEIvncQLHd8 +3QoIVQ5GXnreMF9vuuEU5LlSsqd/Zv5mAQNrbEAfAL+QZQsnHY12qQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.key.insecure new file mode 100644 index 000000000000..07417b2552c6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-common-name/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAxG9UyPf7mMCFGeDiLNWbjOu8wGcc0yHYw4VVOnvB2EQXZ6fu +kWwrNaBzN7CKXq/h3pDMmmAH3JDM9ParRI6962luo/3nTLox08cAX0Opl3T7AL5E +t1KYrmhArOBkRH+8fd5JtNrVR3BosEeVPo+JPMab6ARgvjeCveSV74D1ztpP2mQl +ZAVMJJ2rBxCinIcTCCt88cLwF2fzTiygbge1H0aD9HjG2W7uKvotMee+k8FJxoxk +NRDlQ2sQyHxMzz+xcbqjGqjzqvnClVC3q+xydMytNNSqr4yj84mcMB+mVqqRZfk1 +8gn22YTZXTqGrX+m1CSXvXhZv6yR75onY+hz9QIDAQABAoIBABiq+nS6X4gRNSXI +zd5ffMc3m152FHKXH4d+KPPNMsyb0Gyd9CGi+dIkMhPeQaIeaDjw6iDAynvyWyqw +B1X2rvbvKIvDiNZj03oK1YshDh0M/bBcNHjpEG9mfCi5jR3lBKCx14O0r2/nN95b +Puy6TbuqHU4HrrZ0diCuof2Prk6pd0EhQC+C3bZCcoWXOaRTqrMBTT6DdSMQrVKD +eGTXYqCzs/AlGKkOiErKtKWouNpkPpPiba1qp7YWXUasrXqPgPi4d97TmOShGIfc +zXNJT+e2rDX4OEVAJtOt6U2l9QG+PIhpH4P/ZYsvindm4VZBs+Vysrj4xkLgGBBP +ygOfBIECgYEA0IfP9Z9mzvCXiGrkrx2tN/k31cX674P/KwxPgSWM/AdXenYYzsmj +rVcoFx2eCFnBFdPz4BAqEfH70gtsG7OoTmoJSwN6wurIdGcFQwItrghgt9Qp46Dq +AIT9RXSpcB9AjM6p2reCjWcNeBVMrrHU3eaQitCxZbzuxvMMhMs/zzECgYEA8Sak +UhXFtNjxBW6EMNmTpjhShIZmxtPNzTJ5DtmARr8F+SMELp3JGJj/9Bm4TsvqJmGs +j9g/MVvSTjJlOuYPGJ5DBl3egZ5ZlRJx3I2qA4lFFCb71OJzuoR8YdHRlHnhJOu9 +2Jyrki1wrAefby8Fe/+5vswxq2u+Qurjya716AUCgYB+E06ZGzmmLfH/6Vi/wzqC +F+w5FAzGGNECbtv2ogReL/YktRgElgaee45ig2aTd+h0UQQmWL+Gv/3XHU7MZM+C +MTvTHZRwGlD9h3e37q49hRUsr1pwJE6157HU91al0k9NknlBIigNY9vR2VbWW+/u +BUMomkpWz2ax5CqScuvuUQKBgQCE+zYqPe9kpy1iPWuQNKuDQhPfGO6cPjiDK44u +biqa2MRGetTXkBNRCS48QeKtMS3SNJKgUDOo2GXE0W2ZaTxx6vQzEpidCeGEn0NC +yKw0fwIk9spwvt/qvxyIJNhZ9Ev/vDBYvyyt03kKpLl66ocvtfmMCbZqPWQSKs2q +bl0UsQKBgQDDrsPnuVQiv6l0J9VrZc0f5DYZIJmQij1Rcg/fL1Dv2mEpADrH2hkY +HI27Q15dfgvccAGbGXbZt3xi7TCLDDm+Kl9V9bR2e2EhqA84tFryiBZ5XSDRAWPU +UIjejblTgtzrTqUd75XUkNoKvJIGrLApmQiBJRQbcbwtmt2pWbziyQ== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/Procfile new file mode 100644 index 000000000000..8e9a22c576ff --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/Procfile @@ -0,0 +1,8 @@ +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth + +gateway: ./etcd gateway start --endpoints https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 --trusted-ca-file /certs-gateway/ca.crt --listen-addr 127.0.0.1:23790 diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca.crt new file mode 100644 index 000000000000..7e3814e92d64 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUClliB9ECLPuQpOrlqLkeI1ib7zYwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTE3MDBaFw0yNzExMjkxOTE3 +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCjClF0TCk2qrHUTjFgFv2jmV0yUqnP3SG/7eVCptcFKE7kcGAx+j06GfEP +UXmCV13cgE0dYYLtz7/g29BiZzlBLlLsmpBMM+S4nfVH9BGLbKCSnwp5ba816AuS +rc8+qmJ0fAo56snLQWoAlnZxZ1tVjAtj5ZrQP9QDK2djgyviPS4kqWQ7Ulbeqgs7 +rGz56xAsyMTWYlotgZTnnZ3Pckr1FHXhwkO1rFK5+oMZPh2HhvXL9wv0/TMAypUv +oQqDzUfUvYeaKr6qy1ADc53SQjqeTXg0jOShmnWM2zC7MwX+VPh+6ZApk3NLXwgv +6wT0U1tNfvctp8JvC7FqqCEny9hdAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQWI6eUGqKWkCjOKGAYd+5K6eh5 +GTANBgkqhkiG9w0BAQsFAAOCAQEAS3nIyLoGMsioLb89T1KMq+0NDDCx7R20EguT +qUvFUYKjzdxDA1RlZ2HzPxBJRwBc0Vf98pNtkWCkwUl5hxthndNQo7F9lLs/zNzp +bL4agho6kadIbcb4v/3g9XPSzqJ/ysfrwxZoBd7D+0PVGJjRTIJiN83Kt68IMx2b +8mFEBiMZiSJW+sRuKXMSJsubJE3QRn862y2ktq/lEJyYR6zC0MOeYR6BPIs/B6vU +8/iUbyk5ULc7NzWGytC+QKC3O9RTuA8MGF1aFaNSK7wDyrAlBZdxjWi52Mz3lJCK +ffBaVfvG55WKjwAqgNU17jK/Rxw1ev9mp4aCkXkD0KUTGLcoZw== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/run.sh new file mode 100755 index 000000000000..94fdc32eefdc --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/run.sh @@ -0,0 +1,47 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs-gateway/Procfile start & + +# TODO: remove random sleeps +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379 \ + endpoint health --cluster + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + put abc def + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + get abc + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=127.0.0.1:23790 \ + put ghi jkl + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-gateway/ca.crt \ + --cert=/certs-gateway/server.crt \ + --key=/certs-gateway/server.key.insecure \ + --endpoints=127.0.0.1:23790 \ + get ghi diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server-ca-csr.json new file mode 100644 index 000000000000..77cdb408cf07 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server-ca-csr.json @@ -0,0 +1,22 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "hosts": [ + "m1.etcd.local", + "m2.etcd.local", + "m3.etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.crt new file mode 100644 index 000000000000..688a5afe6418 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEKTCCAxGgAwIBAgIUDOkW+H3KLeHEwsovqOUMKKfEuqQwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTE3MDBaFw0yNzExMjkxOTE3 +MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANfu298kCxFY +KXAmdG5BeqnFoezAJQCtgv+ZRS0+OB4hVsahnNSsztEfIJnVSvYJTr1u+TGSbzBZ +q85ua3S92Mzo/71yoDlFjj1JfBmPdL1Ij1256LAwUYoPXgcACyiKpI1DnTlhwTvU +G41teQBo+u4sxr9beuNpLlehVbknH9JkTNaTbF9/B5hy5hQPomGvzPzzBNAfrb2B +EyqabnzoX4qv6cMsQSJrcOYQ8znnTPWa5WFP8rWujsvxOUjxikQn8d7lkzy+PHwq +zx69L9VzdoWyJgQ3m73SIMTgP+HL+OsxDfmbu++Ds+2i2Dgf/vdJku/rP+Wka7vn +yCM807xi96kCAwEAAaOByTCBxjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFAH+dsuv +L6qvUmB/w9eKl83+MGTtMB8GA1UdIwQYMBaAFBYjp5QaopaQKM4oYBh37krp6HkZ +MEcGA1UdEQRAMD6CDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 +Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAh049 +srxFkiH9Lp8le3fJkuY25T/MUrmfa10RdNSKgj3qcpCMnf9nQjIWtaQsjoZJ5MQc +VIT3gWDWK8SWlpx+O2cVEQDG0ccv7gc38YGywVhMoQ5HthTAjLCbNk4TdKJOIk7D +hmfs7BHDvjRPi38CFklLzdUQaVCcvB43TNA3Y9M75oP/UGOSe3lJz1KKXOI/t+vA +5U3yxwXlVNJVsZgeWAbXN9F6WbCZDsz+4Obpk/LV1NLqgLd/hHXzoOOWNw977S2b ++dOd95OJ/cq09OzKn/g26NgtHOl0xqol7wIwqJhweEEiVueyFxXD04jcsxdAFZSJ +9H6q3inNQaLyJHSYWQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.key.insecure new file mode 100644 index 000000000000..6c0c16c0ba74 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-gateway/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA1+7b3yQLEVgpcCZ0bkF6qcWh7MAlAK2C/5lFLT44HiFWxqGc +1KzO0R8gmdVK9glOvW75MZJvMFmrzm5rdL3YzOj/vXKgOUWOPUl8GY90vUiPXbno +sDBRig9eBwALKIqkjUOdOWHBO9QbjW15AGj67izGv1t642kuV6FVuScf0mRM1pNs +X38HmHLmFA+iYa/M/PME0B+tvYETKppufOhfiq/pwyxBImtw5hDzOedM9ZrlYU/y +ta6Oy/E5SPGKRCfx3uWTPL48fCrPHr0v1XN2hbImBDebvdIgxOA/4cv46zEN+Zu7 +74Oz7aLYOB/+90mS7+s/5aRru+fIIzzTvGL3qQIDAQABAoIBABO8azA79R8Ctdbg +TOf+6B04SRKAhWFIep6t/ZqjAzINzgadot31ZXnLpIkq640NULsTt4cGYU9EAuX9 +RakH6RbhfO5t2aMiblu/qa4UZJEgXqosYc4ovGsn+GofYOW1tlCLC4XBH44+Vr5Y +cSTOc5DtWsUGsXazmF6+Cj3AC7KI+VWegHexGezyO0not8Q5L55TuH2lCW4sx9th +W4Q7jg2lrCvz4x8ZRIAXOGmBaDTZmMtVlEjezu+7xr8QDQsvUwj7a87HPjgXFesj +CbbCr8kaqEdZ23AVDZuLAKS4hWQlbacRhRAxMkomZkg5U6J/PC3ikIqfOda1zu1D +MTIOuwECgYEA8hFkISWVEzbaIZgO1BZl36wNaOLYIpX0CzlycptcEssbefLy7Nxo +TZ+m9AjF6TBPl4fO4edo00iiJMy6ZdhItduNWLO+usJEY9UdzHex7fCUeG8usUXQ +g4VGEvPGg88VEM45pkAgbga7kzkG2Ihfu6La5apbXeOpNpuC58DdlzkCgYEA5Fxl +/qGzLlTwioaaE+qpEX46MfbJl38nkeSf9B7J1ISc/fnDPcBPvcHaYELqyHM+7OFa +Gt9oBDrLgyP4ZgOTaHKHdofXjAMC97b9oa/Lrors5dMrf/fxTTe2X+Kab94E1Wbo +39kA3qzV/CT7EZWuqbHO3Bqkv/qe6ks0Tbahc/ECgYBuB2OpAWkyc6NQ08ohsxCZ +S55Ix5uQlPJ5y6Hu4BlI3ZNeqgSrjz/F0MTVdctnxDLZYLyzyDjImOJCseAj/NyH +9QTZhdIzF6x4aF2EG///dHQ4Del+YIp3zbNdV/sq3Izpt6NSoyFagarvL2OiNtK0 ++kBfVkDze1Dl5mfpKaxPWQKBgQC+gXqxJxKE92VIGyxUqzHqHwTLg9b/ZJuNMU5j +aH/1o8AYfJFtZY7gfeUA4zJckRAQq5rwyilLRgVbXNmvuRHzU4BA2OhvrF+Aag9D +IJXqAYnJ3RXwBtcuFOk3KqKt6mjb4qMpgy4flc5aMDunmtiARo6MvklswtZqHN0A +a/ha8QKBgQCqF/xCf5ORzVkikYYGsO910QXlzsyPdRJbhrBCRTsdhz/paT5GQQXr +y3ToUuKEoHfjFudUeGNOstjchWw+WgT9iqMJhtwV1nU1lkPyjmCQ2ONIP+13dZ+i +I/LDyMngtOKzvD5qpswY1Er+84+RVrtseQjXDC2NlrvDr5LnZDtGag== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/Procfile new file mode 100644 index 000000000000..af291402ca03 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/Procfile @@ -0,0 +1,6 @@ +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca.crt new file mode 100644 index 000000000000..23ee34f4a4d7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUanA77pXfEz2idrPSlIoPrSo6MmcwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA5MDBaFw0yNzExMTEwNDA5 +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDqtw5G6XZ4N2uuc7TAoiXI+IXA/H+IJIbHrVFQ3LIzLDaS6AmVWw4yT4o2 +X/1IbR5TU6dCnGxuHPutnfnG87is5Oxk1HfIy5cfpf75St3uQycJRcr3Bui/fEZ0 +IZaoRyklcYGI8Y+VfaSADl++EP7UU0X7cc263rZulJXkqp4HihDTPixBgVDruNWf +Yfa2K/Zhiq+zj3hE6s/cBn2pIdY6SMlQ1P0uT/Y5oBTTJFBxeqw+Sz/NXgKgErQg +Za/gNHQWzyRoYHiOGQylvsiXr6tgdk29f0Z6gTQy8FQpwOXYERJr45zh8KvE+FJK +MaWUhGW7hkv85JDZSsmDZ6lVYIfhAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBS+p7B3RLjI8HOOPvVhqtBQNRmH +ZTANBgkqhkiG9w0BAQsFAAOCAQEAFWHLvzzTRQJYjVDxBuXrNZkhFsGAoCYoXhAK +1nXmqLb9/dPMxjkB4ptkQNuP8cMCMPMlapoLkHxEihN1sWZwJRfWShRTK2cQ2kd6 +IKH/M3/ido1PqN/CxhfqvMj3ap3ZkV81nvwn3XhciCGca1CyLzij9RroO0Ee+R3h +mK5A38I1YeRMNOnNAJAW+5scaVtPe6famG2p/OcswobF+ojeZIQJcuk7/FP5iXGA +UfG5WaW3bVfSr5aUGtf/RYZvYu3kWZlAzGaey5iLutRc7f63Ma4jjEEauiGLqQ+6 +F17Feafs2ibRr1wes11O0B/9Ivx9qM/CFgEYhJfp/nBgY/UZXw== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/run.sh new file mode 100755 index 000000000000..683a4d28af98 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/run.sh @@ -0,0 +1,33 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs-wildcard/Procfile start & + +# TODO: remove random sleeps +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-wildcard/ca.crt \ + --cert=/certs-wildcard/server.crt \ + --key=/certs-wildcard/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379 \ + endpoint health --cluster + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-wildcard/ca.crt \ + --cert=/certs-wildcard/server.crt \ + --key=/certs-wildcard/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + put abc def + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs-wildcard/ca.crt \ + --cert=/certs-wildcard/server.crt \ + --key=/certs-wildcard/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + get abc diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server-ca-csr.json new file mode 100644 index 000000000000..616bf11f8f1a --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server-ca-csr.json @@ -0,0 +1,20 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "hosts": [ + "*.etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.crt new file mode 100644 index 000000000000..a51cd0b94926 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIUQ0AgAKntDzHW4JxYheDkVMow5ykwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA5MDBaFw0yNzExMTEwNDA5 +MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANMqNEozhdLm +K5ATSkgIOyQmBmoUCgiWB+P52YWzfmwaWwQP2FFs3qih2c3DHHH7s2zdceXKT2ZN +lvSO8yj08slLPYSC4LQ3su8njGJlasJ28JMjRqshnH3umxFXf9+aPcZ5yYkoXE9V +fzsnBMJz8hI6K2j4Q6sJe+v/0pdz8MpbdIPnmL9qfVpuD6JqmDCZiQOJ8lpMuqqD +60uLjtLv/JKjgdqe5C4psERVm09fg3vOZckv9CC6a4MupeXo2il6femZnPrxC8LX +u2KT3njEjoyzEu2NSdy+BUJDVLgKSh8s2TC8ViNfiFONQo6L1y78ZAyCDrRbTgN9 +Nu1Ou/yzqHkCAwEAAaOBqjCBpzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFC83cRfE +/EKcz7GJKmgDLUBi3kRSMB8GA1UdIwQYMBaAFL6nsHdEuMjwc44+9WGq0FA1GYdl +MCgGA1UdEQQhMB+CDCouZXRjZC5sb2NhbIIJbG9jYWxob3N0hwR/AAABMA0GCSqG +SIb3DQEBCwUAA4IBAQCI7estG86E9IEGREfYul1ej8hltpiAxucmsI0i0lyRHOGa +dss3CKs6TWe5LWXThCIJ2WldI/VgPe63Ezz7WuP3EJxt9QclYArIklS/WN+Bjbn7 +6b8KAtGQkFh7hhjoyilBixpGjECcc7lbriXoEpmUZj9DYQymXWtjKeUJCfQjseNS +V/fmsPph8QveN+pGCypdQ9EA4LGXErg4DQMIo40maYf9/uGBMIrddi930llB0wAh +lsGNUDkrKKJVs2PiVsy8p8sF1h7zAQ+gSqk3ZuWjrTqIIMHtRfIaNICimc7wEy1t +u5fbySMusy1PRAwHVdl5yPxx++KlHyBNowh/9OJh +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.key.insecure new file mode 100644 index 000000000000..ac56ed4ea32d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs-wildcard/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA0yo0SjOF0uYrkBNKSAg7JCYGahQKCJYH4/nZhbN+bBpbBA/Y +UWzeqKHZzcMccfuzbN1x5cpPZk2W9I7zKPTyyUs9hILgtDey7yeMYmVqwnbwkyNG +qyGcfe6bEVd/35o9xnnJiShcT1V/OycEwnPyEjoraPhDqwl76//Sl3Pwylt0g+eY +v2p9Wm4PomqYMJmJA4nyWky6qoPrS4uO0u/8kqOB2p7kLimwRFWbT1+De85lyS/0 +ILprgy6l5ejaKXp96Zmc+vELwte7YpPeeMSOjLMS7Y1J3L4FQkNUuApKHyzZMLxW +I1+IU41CjovXLvxkDIIOtFtOA3027U67/LOoeQIDAQABAoIBAH/sM104NTv8QCu5 +4+gbRGizuHMOzL1C1mjfdU0v3chzduvRBYTeZUzXL/Ec3+CVUK8Ev/krREp/epGQ +//Gx4lrbf9sExkem7nk/Biadtb00/KzGVAtcA0evArXQwiCdegsAwHycvL861ibp +jlKWlvE/2AhxTd0Rk8b2ZYdmr1qGTesIy7S4ilj1B8aYWnZglhSyyU7TqLhYmsWo +3B1ufNpkPCzo97bJmc1/bqXCIQXi/HkkDxJRFa/vESebiy2wdgkWflybW37vLaN0 +mox44uXpVYtZuuGyxdKjX6T2EOglZztXlC8gdxrnFS5leyBEu+7ABS5OvHgnlOX5 +80MyUpkCgYEA/4xpEBltbeJPH52Lla8VrcW3nGWPnfY8xUSnjKBspswTQPu389EO +ayM3DewcpIfxFu/BlMzKz0lm77QQZIu3gIJoEu8IXzUa3fJ2IavRKPSvbNFj5Icl +kVX+mE4BtF+tnAjDWiX9qaNXZcU7b0/q0yXzL35WB4H7Op4axqBir/sCgYEA04m3 +4LtRKWgObQXqNaw+8yEvznWdqVlJngyKoJkSVnqwWRuin9eZDfc84genxxT0rGI9 +/3Fw8enfBVIYGLR5V5aYmGfYyRCkN4aeRc0zDlInm0x2UcZShT8D0LktufwRYZh8 +Ui6+iiIBELwxyyWfuybH5hhstbdFazfu1yNA+xsCgYB47tORYNceVyimh4HU9iRG +NfjsNEvArxSXLiQ0Mn74eD7sU7L72QT/wox9NC1h10tKVW/AoSGg8tWZvha73jqa +wBvMSf4mQBVUzzcEPDEhNpoF3xlsvmAS5SU0okXAPD8GRkdcU/o02y2y5aF4zdMM +1Tq+UQUZTHO9i7CUKrZJHQKBgQC+FueRn0ITv1oXRlVs3dfDi3L2SGLhJ0csK4D3 +SBZed+m4aUj98jOrhRzE0LRIBeDId4/W2A3ylYK/uUHGEYdo2f9OFSONqtKmwuW/ +O+JBYDoPJ+q7GUhWTIYVLhKVKppD5U7yWucGIgBrFXJ5Ztnex76iWhh2Qray3pRV +52whOQKBgHVBI4F7pkn6id9W4sx2LqrVjpjw6vTDepIRK0SXBIQp34WnCL5CERDJ +pks203i42Ww7IadufepkGQOfwuik9wVRNWrNp4oKle6oNK9oK3ihuyb+5DtyKwDm +5sQUYUXc5E3qDQhHCGDzbT7wP+bCDnWKgvV6smshuQSW8M+tFIOQ +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/Procfile b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/Procfile new file mode 100644 index 000000000000..c25bff58b3e6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/Procfile @@ -0,0 +1,6 @@ +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth + +etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth + +etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca-csr.json new file mode 100644 index 000000000000..ecafabaadd3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca.crt new file mode 100644 index 000000000000..4a17292de688 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDsTCCApmgAwIBAgIUCeu/ww6+XbCM3m8m6fp17t8bjOcwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA2MDBaFw0yNzExMTEwNDA2 +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCgH8KMvldAoQjWA5YQoEOQgRyjZ3hkKdTQcFBj3OR8OyhiNJ+4oEJ/AqyJ +b41G9NGd+88hRSrcCeUBrUY3nWVhqzclCe7mQ1IyordmuKxekmPD/uvzcbySzpJT +qGEwNEiiBcr4mSQiGA5yMgBLKLpKw27t0ncVn/Qt0rKtqwLUYYWGEfADLw7+6iDK +xzCxLeXV/cB1VtFZa62j3KRJR4XQ/QosqwZw2dRGF/jUZgmsRYYK8noOvqY/uRPV +sqwGAKq0B0zOMp185dFrzJVD+LHZgSS9GLGmvRgttwayDuYSOny7WXugQ28fCaRX +p+53s1eBb5cHCGSko48f2329cnlFAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgglhjDWaAJm9ju5x1YMArtH7c +yjANBgkqhkiG9w0BAQsFAAOCAQEAK6IGimbnP9oFSvwNGmXjEtn/vE82dDhQJv8k +oiAsx0JurXBYybvu/MLaBJVQ6bF77hW/fzvhMOzLNEMGY1ql80TmfaTqyPpTN85I +6YhXOViZEQJvH17lVA8d57aSve0WPZqBqS3xI0dGpn/Ji6JPrjKCrgjeukXXHR+L +MScK1lpxaCjD45SMJCzANsMnIKTiKN8RnIcSmnrr/gGl7bC6Y7P84xUGgYu2hvNG +1DZBcelmzbZYk2DtbrR0Ed6IFD1Tz4RAEuKJfInjgAP2da41j4smoecXOsJMGVl5 +5RX7ba3Hohys6la8jSS3opCPKkwEN9mQaB++iN1qoZFY4qB9gg== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencert.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencert.json new file mode 100644 index 000000000000..09b67267bb25 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencerts.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencerts.sh new file mode 100755 index 000000000000..efc098f5398d --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/gencerts.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +rm -f *.csr *.pem *.stderr *.txt diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/run.sh b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/run.sh new file mode 100755 index 000000000000..7f6c31d4fe1c --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/run.sh @@ -0,0 +1,33 @@ +#!/bin/sh +rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data + +/etc/init.d/bind9 start + +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts + +goreman -f /certs/Procfile start & + +# TODO: remove random sleeps +sleep 7s + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs/ca.crt \ + --cert=/certs/server.crt \ + --key=/certs/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379 \ + endpoint health --cluster + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs/ca.crt \ + --cert=/certs/server.crt \ + --key=/certs/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + put abc def + +ETCDCTL_API=3 ./etcdctl \ + --cacert=/certs/ca.crt \ + --cert=/certs/server.crt \ + --key=/certs/server.key.insecure \ + --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ + get abc diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server-ca-csr.json b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server-ca-csr.json new file mode 100644 index 000000000000..77cdb408cf07 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server-ca-csr.json @@ -0,0 +1,22 @@ +{ + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "hosts": [ + "m1.etcd.local", + "m2.etcd.local", + "m3.etcd.local", + "127.0.0.1", + "localhost" + ] +} diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.crt b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.crt new file mode 100644 index 000000000000..928e3cf5db6e --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEKTCCAxGgAwIBAgIUUwtQlOqMccWY8MOaSaWutEjlMrgwDQYJKoZIhvcNAQEL +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA2MDBaFw0yNzExMTEwNDA2 +MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALyYH7bL79If +75AezzSpkuTOPAGBzPlGFLM5QS4jrt6fJBpElAUV2VmZm+isVsTs2X63md1t4s3Y +44soYK02HONUxUXxbeW7S8yJYSplG5hCJpFiSVP0GyVojQ04OLO1yI5m82fWJNi6 +9PgTmb3+/YD08TKbjjJ4FB0kqoFJE4qoUNNpbkpQxHW4cx9iyWbE9gwyGoC76ftr +DC4J5HavmZ/y51rq1VWrO/d9rmCEUN++M8FcGt6D4WVQ54sWafl4Q1HafBq3FAT5 +swpqi6aDDFKYYTdvjFEmJ2uWacak8NO+vjTt8fTfSFBUYcxweVWIDm6xU8kR8Lwy +aNxD26jQ9GMCAwEAAaOByTCBxjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFELi+Ig+ +uxXrOvjoacXjcCjtfHcsMB8GA1UdIwQYMBaAFKCCWGMNZoAmb2O7nHVgwCu0ftzK +MEcGA1UdEQRAMD6CDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 +Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAn6e8 +LPd53xQGiicDHN8+WkUS7crr+A+bIfY0nbWUf1H7zwNxpHHnKgVRHc4MKpRY4f+E +M2bEYdNJZDrjFYIWWlFDteVKZevH2dB3weiCAYWPYuiR9dGH6NvVbPcEMwarPBW4 +mLsm9Nl/r7YBxXx73rhfxyBbhTuDwKtY/BAMi+ZO4msnuWiiSiQEUrEmzm9PWhAD +CgNjxCL3xoGyIJGj1xev0PYo+iFrAd9Pkfg2+FaSYXtNPbZX229yHmxU7GbOJumx +5vGQMRtzByq7wqw1dZpITlgbDPJc5jdIRKGnusQ96GXLORSQcP+tfG4NhreYYpI1 +69Y78gNCTl0uGmI21g== +-----END CERTIFICATE----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.key.insecure b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.key.insecure new file mode 100644 index 000000000000..08784a7c65dc --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/certs/server.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvJgftsvv0h/vkB7PNKmS5M48AYHM+UYUszlBLiOu3p8kGkSU +BRXZWZmb6KxWxOzZfreZ3W3izdjjiyhgrTYc41TFRfFt5btLzIlhKmUbmEImkWJJ +U/QbJWiNDTg4s7XIjmbzZ9Yk2Lr0+BOZvf79gPTxMpuOMngUHSSqgUkTiqhQ02lu +SlDEdbhzH2LJZsT2DDIagLvp+2sMLgnkdq+Zn/LnWurVVas7932uYIRQ374zwVwa +3oPhZVDnixZp+XhDUdp8GrcUBPmzCmqLpoMMUphhN2+MUSYna5ZpxqTw076+NO3x +9N9IUFRhzHB5VYgObrFTyRHwvDJo3EPbqND0YwIDAQABAoIBAQC0YCbM9YZ9CRBe +Xik9rAYTknBv3I6Hx5BaziLaF0TUJY8pFHgh2QDVooYsZlBi7kki2kVuNAAdcxhG +ayrz33KHtvcq6zt54sYfbTGik6tt1679k+ygQDOKdtGZWDFbKD0Wfb7FjFoXc9CC +SHTd9DjPkvXxujepa5GJQh1Vo+ftz2I+8e6LeoiBZJM1IosfrpxKg02UnWrLia7o +i8eoXIyMAJHuNUGpGl33WnckyMGDsVKMc2DVG2exfVBZ37lAemYOLRKmd4AwUk2l +ztd71sXQodLk++1BqaS9cc9yvsNiBjGL3Ehm7uUcLH1k3VHd4ArcGhiqffKzQuSE +Dhm8GXNZAoGBAMrXOAdnfLlxYKCqOaj0JwN0RusWBP3cC7jluS5UmeTROPnBa0Fb +219YtiXkDrWtoiwLvvPXobem0/juPkiGnprGcOsPUGa6pV3TPJ40BiIfh9/vt7fr +Bko2SqEA9U0FxredcOFoCPxX9k9EDWxhF/nD20amvRHKK/wv995iXKxHAoGBAO4F +GILNxBHlH5F++dbSSSTcZUTXvuBr7JQkbMK+luSikEtaSW9IO2bf65LtqjaWp4Ds +rENCQAB3PmI111Rjwrk7925W0JCHw/+UArlVoM3K2q1zhYUWAn9L3v+qUTN2TLu1 +Tso3OkCrQ5aa812tffW3hZHOWJ+aZp2nnBnruDEFAoGAGJDCD1uAJnFNs4eKHiUb +iHaPlC8BgcEhhk4EBFFopeaU0OKU28CFK+HxhVs+UNBrgIwXny5xPm2s5EvuLRho +ovP/fuhG43odRuSrRbmlOIK7EOrWRCbphxlWJnOYQbC+ZURjBFl2JSF+ChGC0qpb +nfsTVlYhNcNXWl5w1XTyJkcCgYEAp07XquJeh0GqTgiWL8XC+nEdkiWhG3lhY8Sy +2rVDtdT7XqxJYDrC3o5Ztf7vnc2KUpqKgACqomkvZbN49+3j63bWdy35Dw8P27A7 +tfEVxnJoAnJokWMmQDqhts8OowDt8SgCCSyG+vwn10518QxJtRXaguIr84yBwyIV +HTdPUs0CgYBIAxoPD9/6R2swClvln15sjaIXDp5rYLbm6mWU8fBURU2fdUw3VBlJ +7YVgQ4GnKiCI7NueBBNRhjXA3KDkFyZw0/oKe2uc/4Gdyx1/L40WbYOaxJD2vIAf +FZ4pK9Yq5Rp3XiCNm0eURBlNM+fwXOQin2XdzDRoEq1B5JalQO87lA== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/etcd.zone b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/etcd.zone new file mode 100644 index 000000000000..03c15fe8e667 --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/etcd.zone @@ -0,0 +1,14 @@ +$TTL 86400 +@ IN SOA etcdns.local. root.etcdns.local. ( + 100500 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 86400 ) ; Negative Cache TTL + IN NS ns.etcdns.local. + IN A 127.0.0.1 + +ns IN A 127.0.0.1 +m1 IN A 127.0.0.1 +m2 IN A 127.0.0.1 +m3 IN A 127.0.0.1 diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/named.conf b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/named.conf new file mode 100644 index 000000000000..83549305c34f --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/named.conf @@ -0,0 +1,23 @@ +options { + directory "/var/bind"; + listen-on { 127.0.0.1; }; + listen-on-v6 { none; }; + allow-transfer { + none; + }; + // If you have problems and are behind a firewall: + query-source address * port 53; + pid-file "/var/run/named/named.pid"; + allow-recursion { none; }; + recursion no; +}; + +zone "etcd.local" IN { + type master; + file "/etc/bind/etcd.zone"; +}; + +zone "0.0.127.in-addr.arpa" { + type master; + file "/etc/bind/rdns.zone"; +}; diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/rdns.zone b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/rdns.zone new file mode 100644 index 000000000000..fb71b30b1fad --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/rdns.zone @@ -0,0 +1,13 @@ +$TTL 86400 +@ IN SOA etcdns.local. root.etcdns.local. ( + 100500 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 86400 ) ; Negative Cache TTL + IN NS ns.etcdns.local. + IN A 127.0.0.1 + +1 IN PTR m1.etcd.local. +1 IN PTR m2.etcd.local. +1 IN PTR m3.etcd.local. diff --git a/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/resolv.conf b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/resolv.conf new file mode 100644 index 000000000000..bbc8559cd54f --- /dev/null +++ b/vendor/github.com/coreos/etcd/hack/scripts-dev/docker-dns/resolv.conf @@ -0,0 +1 @@ +nameserver 127.0.0.1 diff --git a/vendor/github.com/coreos/etcd/integration/bridge.go b/vendor/github.com/coreos/etcd/integration/bridge.go index b9e67318e521..59cebe1f0e03 100644 --- a/vendor/github.com/coreos/etcd/integration/bridge.go +++ b/vendor/github.com/coreos/etcd/integration/bridge.go @@ -17,6 +17,7 @@ package integration import ( "fmt" "io" + "io/ioutil" "net" "sync" @@ -31,9 +32,10 @@ type bridge struct { l net.Listener conns map[*bridgeConn]struct{} - stopc chan struct{} - pausec chan struct{} - wg sync.WaitGroup + stopc chan struct{} + pausec chan struct{} + blackholec chan struct{} + wg sync.WaitGroup mu sync.Mutex } @@ -41,11 +43,12 @@ type bridge struct { func newBridge(addr string) (*bridge, error) { b := &bridge{ // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - inaddr: addr + "0", - outaddr: addr, - conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}), - pausec: make(chan struct{}), + inaddr: addr + "0", + outaddr: addr, + conns: make(map[*bridgeConn]struct{}), + stopc: make(chan struct{}), + pausec: make(chan struct{}), + blackholec: make(chan struct{}), } close(b.pausec) @@ -152,12 +155,12 @@ func (b *bridge) serveConn(bc *bridgeConn) { var wg sync.WaitGroup wg.Add(2) go func() { - io.Copy(bc.out, bc.in) + b.ioCopy(bc, bc.out, bc.in) bc.close() wg.Done() }() go func() { - io.Copy(bc.in, bc.out) + b.ioCopy(bc, bc.in, bc.out) bc.close() wg.Done() }() @@ -179,3 +182,47 @@ func (bc *bridgeConn) close() { bc.in.Close() bc.out.Close() } + +func (b *bridge) Blackhole() { + b.mu.Lock() + close(b.blackholec) + b.mu.Unlock() +} + +func (b *bridge) Unblackhole() { + b.mu.Lock() + for bc := range b.conns { + bc.Close() + } + b.conns = make(map[*bridgeConn]struct{}) + b.blackholec = make(chan struct{}) + b.mu.Unlock() +} + +// ref. https://github.com/golang/go/blob/master/src/io/io.go copyBuffer +func (b *bridge) ioCopy(bc *bridgeConn, dst io.Writer, src io.Reader) (err error) { + buf := make([]byte, 32*1024) + for { + select { + case <-b.blackholec: + io.Copy(ioutil.Discard, src) + return nil + default: + } + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if ew != nil { + return ew + } + if nr != nw { + return io.ErrShortWrite + } + } + if er != nil { + err = er + break + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/integration/cluster.go b/vendor/github.com/coreos/etcd/integration/cluster.go index 18c526bbf146..2907e994de27 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster.go +++ b/vendor/github.com/coreos/etcd/integration/cluster.go @@ -31,11 +31,9 @@ import ( "testing" "time" - "golang.org/x/net/context" - "google.golang.org/grpc" - "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/embed" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" @@ -50,7 +48,11 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + "github.com/coreos/pkg/capnslog" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) const ( @@ -88,12 +90,21 @@ var ( ) type ClusterConfig struct { - Size int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - DiscoveryURL string - UseGRPC bool - QuotaBackendBytes int64 + Size int + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + DiscoveryURL string + UseGRPC bool + QuotaBackendBytes int64 + MaxRequestBytes uint + GRPCKeepAliveMinTime time.Duration + GRPCKeepAliveInterval time.Duration + GRPCKeepAliveTimeout time.Duration + // SkipCreatingClient to skip creating clients for each member. + SkipCreatingClient bool + + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int } type cluster struct { @@ -221,10 +232,16 @@ func (c *cluster) HTTPMembers() []client.Member { func (c *cluster) mustNewMember(t *testing.T) *member { m := mustNewMember(t, memberConfig{ - name: c.name(rand.Int()), - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, + name: c.name(rand.Int()), + peerTLS: c.cfg.PeerTLS, + clientTLS: c.cfg.ClientTLS, + quotaBackendBytes: c.cfg.QuotaBackendBytes, + maxRequestBytes: c.cfg.MaxRequestBytes, + grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, + grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, + grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, + clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize, + clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize, }) m.DiscoveryURL = c.cfg.DiscoveryURL if c.cfg.UseGRPC { @@ -364,7 +381,7 @@ func (c *cluster) waitLeader(t *testing.T, membs []*member) int { // ensure leader is up via linearizable get for { - ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration) + ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second) _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) cancel() if err == nil || strings.Contains(err.Error(), "Key not found") { @@ -474,23 +491,32 @@ type member struct { s *etcdserver.EtcdServer hss []*httptest.Server - grpcServer *grpc.Server - grpcAddr string - grpcBridge *bridge + grpcServerOpts []grpc.ServerOption + grpcServer *grpc.Server + grpcAddr string + grpcBridge *bridge // serverClient is a clientv3 that directly calls the etcdserver. serverClient *clientv3.Client - keepDataDirTerminate bool + keepDataDirTerminate bool + clientMaxCallSendMsgSize int + clientMaxCallRecvMsgSize int } func (m *member) GRPCAddr() string { return m.grpcAddr } type memberConfig struct { - name string - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - quotaBackendBytes int64 + name string + peerTLS *transport.TLSInfo + clientTLS *transport.TLSInfo + quotaBackendBytes int64 + maxRequestBytes uint + grpcKeepAliveMinTime time.Duration + grpcKeepAliveInterval time.Duration + grpcKeepAliveTimeout time.Duration + clientMaxCallSendMsgSize int + clientMaxCallRecvMsgSize int } // mustNewMember return an inited member with the given name. If peerTLS is @@ -538,7 +564,30 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member { m.ElectionTicks = electionTicks m.TickMs = uint(tickDuration / time.Millisecond) m.QuotaBackendBytes = mcfg.quotaBackendBytes + m.MaxRequestBytes = mcfg.maxRequestBytes + if m.MaxRequestBytes == 0 { + m.MaxRequestBytes = embed.DefaultMaxRequestBytes + } m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough + + m.grpcServerOpts = []grpc.ServerOption{} + if mcfg.grpcKeepAliveMinTime > time.Duration(0) { + m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: mcfg.grpcKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if mcfg.grpcKeepAliveInterval > time.Duration(0) && + mcfg.grpcKeepAliveTimeout > time.Duration(0) { + m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: mcfg.grpcKeepAliveInterval, + Timeout: mcfg.grpcKeepAliveTimeout, + })) + } + + m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize + m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize + return m } @@ -567,6 +616,8 @@ func (m *member) electionTimeout() time.Duration { func (m *member) DropConnections() { m.grpcBridge.Reset() } func (m *member) PauseConnections() { m.grpcBridge.Pause() } func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } +func (m *member) Blackhole() { m.grpcBridge.Blackhole() } +func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *member) (*clientv3.Client, error) { @@ -575,8 +626,10 @@ func NewClientV3(m *member) (*clientv3.Client, error) { } cfg := clientv3.Config{ - Endpoints: []string{m.grpcAddr}, - DialTimeout: 5 * time.Second, + Endpoints: []string{m.grpcAddr}, + DialTimeout: 5 * time.Second, + MaxCallSendMsgSize: m.clientMaxCallSendMsgSize, + MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize, } if m.ClientTLSInfo != nil { @@ -676,7 +729,7 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg) + m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...) m.serverClient = v3client.New(m.s) lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) @@ -824,7 +877,7 @@ func (m *member) Metric(metricName string) (string, error) { } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t *testing.T, others []*member) { +func (m *member) InjectPartition(t *testing.T, others ...*member) { for _, other := range others { m.s.CutPeer(other.s.ID()) other.s.CutPeer(m.s.ID()) @@ -832,7 +885,7 @@ func (m *member) InjectPartition(t *testing.T, others []*member) { } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t *testing.T, others []*member) { +func (m *member) RecoverPartition(t *testing.T, others ...*member) { for _, other := range others { m.s.MendPeer(other.s.ID()) other.s.MendPeer(m.s.ID()) @@ -884,12 +937,15 @@ func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 { cluster: NewClusterByConfig(t, cfg), } clus.Launch(t) - for _, m := range clus.Members { - client, err := NewClientV3(m) - if err != nil { - t.Fatalf("cannot create client: %v", err) + + if !cfg.SkipCreatingClient { + for _, m := range clus.Members { + client, err := NewClientV3(m) + if err != nil { + t.Fatalf("cannot create client: %v", err) + } + clus.clients = append(clus.clients, client) } - clus.clients = append(clus.clients, client) } return clus diff --git a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go index 3916553be867..15094358e7b1 100644 --- a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go +++ b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go @@ -99,12 +99,12 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { return nil, err } rpc := toGRPC(c) - c.KV = clientv3.NewKVFromKVClient(rpc.KV) + c.KV = clientv3.NewKVFromKVClient(rpc.KV, c) pmu.Lock() lc := c.Lease - c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, cfg.DialTimeout) + c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, c, cfg.DialTimeout) c.Watcher = &proxyCloser{ - Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch), + Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c), wdonec: proxies[c].wdonec, kvdonec: proxies[c].kvdonec, lclose: func() { lc.Close() }, diff --git a/vendor/github.com/coreos/etcd/integration/embed_test.go b/vendor/github.com/coreos/etcd/integration/embed_test.go index 751494eaea0f..03a32887f446 100644 --- a/vendor/github.com/coreos/etcd/integration/embed_test.go +++ b/vendor/github.com/coreos/etcd/integration/embed_test.go @@ -47,7 +47,7 @@ func TestEmbedEtcd(t *testing.T) { {werr: "expected IP"}, } - urls := newEmbedURLs(10) + urls := newEmbedURLs(false, 10) // setup defaults for i := range tests { @@ -105,12 +105,19 @@ func TestEmbedEtcd(t *testing.T) { } } -// TestEmbedEtcdGracefulStop ensures embedded server stops +func TestEmbedEtcdGracefulStopSecure(t *testing.T) { testEmbedEtcdGracefulStop(t, true) } +func TestEmbedEtcdGracefulStopInsecure(t *testing.T) { testEmbedEtcdGracefulStop(t, false) } + +// testEmbedEtcdGracefulStop ensures embedded server stops // cutting existing transports. -func TestEmbedEtcdGracefulStop(t *testing.T) { +func testEmbedEtcdGracefulStop(t *testing.T, secure bool) { cfg := embed.NewConfig() + if secure { + cfg.ClientTLSInfo = testTLSInfo + cfg.PeerTLSInfo = testTLSInfo + } - urls := newEmbedURLs(2) + urls := newEmbedURLs(secure, 2) setupEmbedCfg(cfg, []url.URL{urls[0]}, []url.URL{urls[1]}) cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprintf("embed-etcd")) @@ -123,7 +130,16 @@ func TestEmbedEtcdGracefulStop(t *testing.T) { } <-e.Server.ReadyNotify() // wait for e.Server to join the cluster - cli, err := clientv3.New(clientv3.Config{Endpoints: []string{urls[0].String()}}) + clientCfg := clientv3.Config{ + Endpoints: []string{urls[0].String()}, + } + if secure { + clientCfg.TLS, err = testTLSInfo.ClientConfig() + if err != nil { + t.Fatal(err) + } + } + cli, err := clientv3.New(clientCfg) if err != nil { t.Fatal(err) } @@ -146,9 +162,13 @@ func TestEmbedEtcdGracefulStop(t *testing.T) { } } -func newEmbedURLs(n int) (urls []url.URL) { +func newEmbedURLs(secure bool, n int) (urls []url.URL) { + scheme := "unix" + if secure { + scheme = "unixs" + } for i := 0; i < n; i++ { - u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d%06d", os.Getpid(), i)) + u, _ := url.Parse(fmt.Sprintf("%s://localhost:%d%06d", scheme, os.Getpid(), i)) urls = append(urls, *u) } return diff --git a/vendor/github.com/coreos/etcd/integration/network_partition_test.go b/vendor/github.com/coreos/etcd/integration/network_partition_test.go index 21130eb02652..b94d6bc4c9b9 100644 --- a/vendor/github.com/coreos/etcd/integration/network_partition_test.go +++ b/vendor/github.com/coreos/etcd/integration/network_partition_test.go @@ -149,12 +149,12 @@ func getMembersByIndexSlice(clus *cluster, idxs []int) []*member { func injectPartition(t *testing.T, src, others []*member) { for _, m := range src { - m.InjectPartition(t, others) + m.InjectPartition(t, others...) } } func recoverPartition(t *testing.T, src, others []*member) { for _, m := range src { - m.RecoverPartition(t, others) + m.RecoverPartition(t, others...) } } diff --git a/vendor/github.com/coreos/etcd/integration/v3_auth_test.go b/vendor/github.com/coreos/etcd/integration/v3_auth_test.go index 708e34aa99d3..83e45d523095 100644 --- a/vendor/github.com/coreos/etcd/integration/v3_auth_test.go +++ b/vendor/github.com/coreos/etcd/integration/v3_auth_test.go @@ -158,3 +158,55 @@ func TestV3AuthOldRevConcurrent(t *testing.T) { } wg.Wait() } + +// TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases +// with root user be revoked after TTL. +func TestV3AuthWithLeaseRevokeWithRoot(t *testing.T) { + defer testutil.AfterTest(t) + + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + api := toGRPC(clus.Client(0)) + authSetupRoot(t, api.Auth) + + rootc, cerr := clientv3.New(clientv3.Config{ + Endpoints: clus.Client(0).Endpoints(), + Username: "root", + Password: "123", + }) + if cerr != nil { + t.Fatal(cerr) + } + defer rootc.Close() + + leaseResp, err := rootc.Grant(context.TODO(), 2) + if err != nil { + t.Fatal(err) + } + leaseID := leaseResp.ID + + if _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)); err != nil { + t.Fatal(err) + } + + // wait for lease expire + time.Sleep(3 * time.Second) + + tresp, terr := api.Lease.LeaseTimeToLive( + context.TODO(), + &pb.LeaseTimeToLiveRequest{ + ID: int64(leaseID), + Keys: true, + }, + ) + if terr != nil { + t.Error(terr) + } + if len(tresp.Keys) > 0 || tresp.GrantedTTL != 0 { + t.Errorf("lease %016x should have been revoked, got %+v", leaseID, tresp) + } + if tresp.TTL != -1 { + t.Errorf("lease %016x should have been expired, got %+v", leaseID, tresp) + } +} diff --git a/vendor/github.com/coreos/etcd/integration/v3_grpc_test.go b/vendor/github.com/coreos/etcd/integration/v3_grpc_test.go index 30de93cd214c..81cc2f1cce22 100644 --- a/vendor/github.com/coreos/etcd/integration/v3_grpc_test.go +++ b/vendor/github.com/coreos/etcd/integration/v3_grpc_test.go @@ -1372,7 +1372,7 @@ func TestTLSGRPCRejectInsecureClient(t *testing.T) { // nil out TLS field so client will use an insecure connection clus.Members[0].ClientTLSInfo = nil client, err := NewClientV3(clus.Members[0]) - if err != nil && err != grpc.ErrClientConnTimeout { + if err != nil && err != context.DeadlineExceeded { t.Fatalf("unexpected error (%v)", err) } else if client == nil { // Ideally, no client would be returned. However, grpc will @@ -1408,7 +1408,7 @@ func TestTLSGRPCRejectSecureClient(t *testing.T) { client, err := NewClientV3(clus.Members[0]) if client != nil || err == nil { t.Fatalf("expected no client") - } else if err != grpc.ErrClientConnTimeout { + } else if err != context.DeadlineExceeded { t.Fatalf("unexpected error (%v)", err) } } @@ -1565,8 +1565,8 @@ func testTLSReload(t *testing.T, cloneFunc func() transport.TLSInfo, replaceFunc // 5. expect dial time-out when loading expired certs select { case gerr := <-errc: - if gerr != grpc.ErrClientConnTimeout { - t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, gerr) + if gerr != context.DeadlineExceeded { + t.Fatalf("expected %v, got %v", context.DeadlineExceeded, gerr) } case <-time.After(5 * time.Second): t.Fatal("failed to receive dial timeout error") @@ -1611,7 +1611,7 @@ func TestGRPCRequireLeader(t *testing.T) { time.Sleep(time.Duration(3*electionTicks) * tickDuration) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - ctx := metadata.NewContext(context.Background(), md) + ctx := metadata.NewOutgoingContext(context.Background(), md) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} if _, err := toGRPC(client).KV.Put(ctx, reqput); grpc.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) @@ -1633,7 +1633,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) { wAPI := toGRPC(client).Watch md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - ctx := metadata.NewContext(context.Background(), md) + ctx := metadata.NewOutgoingContext(context.Background(), md) wStream, err := wAPI.Watch(ctx) if err != nil { t.Fatalf("wAPI.Watch error: %v", err) @@ -1680,6 +1680,43 @@ func TestGRPCStreamRequireLeader(t *testing.T) { } } +// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended. +func TestV3LargeRequests(t *testing.T) { + defer testutil.AfterTest(t) + tests := []struct { + maxRequestBytes uint + valueSize int + expectError error + }{ + // don't set to 0. use 0 as the default. + {1, 1024, rpctypes.ErrGRPCRequestTooLarge}, + {10 * 1024 * 1024, 9 * 1024 * 1024, nil}, + {10 * 1024 * 1024, 10 * 1024 * 1024, rpctypes.ErrGRPCRequestTooLarge}, + {10 * 1024 * 1024, 10*1024*1024 + 5, rpctypes.ErrGRPCRequestTooLarge}, + } + for i, test := range tests { + clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) + kvcli := toGRPC(clus.Client(0)).KV + reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)} + _, err := kvcli.Put(context.TODO(), reqput) + if !eqErrGRPC(err, test.expectError) { + t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err) + } + + // request went through, expect large response back from server + if test.expectError == nil { + reqget := &pb.RangeRequest{Key: []byte("foo")} + // limit receive call size with original value + gRPC overhead bytes + _, err = kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024)) + if err != nil { + t.Errorf("#%d: range expected no error, got %v", i, err) + } + } + + clus.Terminate(t) + } +} + func eqErrGRPC(err1 error, err2 error) bool { return !(err1 == nil && err2 != nil) || err1.Error() == err2.Error() } diff --git a/vendor/github.com/coreos/etcd/integration/v3_lease_test.go b/vendor/github.com/coreos/etcd/integration/v3_lease_test.go index dbb8e6b727ab..7bb72ba131f8 100644 --- a/vendor/github.com/coreos/etcd/integration/v3_lease_test.go +++ b/vendor/github.com/coreos/etcd/integration/v3_lease_test.go @@ -460,7 +460,7 @@ func TestV3LeaseFailover(t *testing.T) { lreq := &pb.LeaseKeepAliveRequest{ID: lresp.ID} md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - mctx := metadata.NewContext(context.Background(), md) + mctx := metadata.NewOutgoingContext(context.Background(), md) ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) @@ -508,7 +508,7 @@ func TestV3LeaseRequireLeader(t *testing.T) { clus.Members[2].Stop(t) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - mctx := metadata.NewContext(context.Background(), md) + mctx := metadata.NewOutgoingContext(context.Background(), md) ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go index 62a831adbd2b..9d053337f78a 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/pkg/capnslog" ) @@ -370,10 +370,10 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error { } tmpb, berr := tmptx.CreateBucketIfNotExists(next) - tmpb.FillPercent = 0.9 // for seq write in for each if berr != nil { return berr } + tmpb.FillPercent = 0.9 // for seq write in for each b.ForEach(func(k, v []byte) error { count++ diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend_test.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend_test.go index af898b5ad3af..664579bbaba1 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend_test.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func TestBackendClose(t *testing.T) { diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go index a47f67d49b8f..e5fb84740899 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) type BatchTx interface { diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go index 582cbf84e7ac..57549b98b990 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go @@ -19,7 +19,7 @@ import ( "testing" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func TestBatchTxPut(t *testing.T) { diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go index 9b9a7ee3a340..edfed0025c6c 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go @@ -16,7 +16,7 @@ package backend -import "github.com/boltdb/bolt" +import bolt "github.com/coreos/bbolt" var boltOpenOptions *bolt.Options = nil diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go index 416158c61002..a8f6abeba63e 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go @@ -17,7 +17,7 @@ package backend import ( "syscall" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) // syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go index fe30a220490a..71d02700bcdc 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go @@ -16,7 +16,7 @@ package backend -import "github.com/boltdb/bolt" +import bolt "github.com/coreos/bbolt" var boltOpenOptions *bolt.Options = nil diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go index 51596ffdf27b..9fc6b7906206 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go @@ -19,7 +19,7 @@ import ( "math" "sync" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) // safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go index 68d9ab71d271..5b00bcba9974 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -188,7 +188,7 @@ func (s *watchableStore) Restore(b backend.Backend) error { } for wa := range s.synced.watchers { - s.unsynced.watchers.add(wa) + s.unsynced.add(wa) } s.synced = newWatcherGroup() return nil diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_test.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_test.go index 60fe949a3be0..141627c8be6a 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_test.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_test.go @@ -295,36 +295,45 @@ func TestWatchFutureRev(t *testing.T) { } func TestWatchRestore(t *testing.T) { - b, tmpPath := backend.NewDefaultTmpBackend() - s := newWatchableStore(b, &lease.FakeLessor{}, nil) - defer cleanup(s, b, tmpPath) - - testKey := []byte("foo") - testValue := []byte("bar") - rev := s.Put(testKey, testValue, lease.NoLease) - - newBackend, newPath := backend.NewDefaultTmpBackend() - newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil) - defer cleanup(newStore, newBackend, newPath) - - w := newStore.NewWatchStream() - w.Watch(testKey, nil, rev-1) - - newStore.Restore(b) - select { - case resp := <-w.Chan(): - if resp.Revision != rev { - t.Fatalf("rev = %d, want %d", resp.Revision, rev) - } - if len(resp.Events) != 1 { - t.Fatalf("failed to get events from the response") - } - if resp.Events[0].Kv.ModRevision != rev { - t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev) + test := func(delay time.Duration) func(t *testing.T) { + return func(t *testing.T) { + b, tmpPath := backend.NewDefaultTmpBackend() + s := newWatchableStore(b, &lease.FakeLessor{}, nil) + defer cleanup(s, b, tmpPath) + + testKey := []byte("foo") + testValue := []byte("bar") + rev := s.Put(testKey, testValue, lease.NoLease) + + newBackend, newPath := backend.NewDefaultTmpBackend() + newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil) + defer cleanup(newStore, newBackend, newPath) + + w := newStore.NewWatchStream() + w.Watch(testKey, nil, rev-1) + + time.Sleep(delay) + + newStore.Restore(b) + select { + case resp := <-w.Chan(): + if resp.Revision != rev { + t.Fatalf("rev = %d, want %d", resp.Revision, rev) + } + if len(resp.Events) != 1 { + t.Fatalf("failed to get events from the response") + } + if resp.Events[0].Kv.ModRevision != rev { + t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev) + } + case <-time.After(time.Second): + t.Fatal("failed to receive event in 1 second.") + } } - case <-time.After(time.Second): - t.Fatal("failed to receive event in 1 second.") } + + t.Run("Normal", test(0)) + t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration } // TestWatchBatchUnsynced tests batching on unsynced watchers diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster_test.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster_test.go index 031a95684db3..e36234c195cb 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster_test.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster_test.go @@ -107,15 +107,20 @@ func newClusterProxyServer(endpoints []string, t *testing.T) *clusterproxyTestSe } var opts []grpc.ServerOption cts.server = grpc.NewServer(opts...) - go cts.server.Serve(cts.l) - - // wait some time for free port 0 to be resolved - time.Sleep(500 * time.Millisecond) + servec := make(chan struct{}) + go func() { + <-servec + cts.server.Serve(cts.l) + }() Register(client, "test-prefix", cts.l.Addr().String(), 7) cts.cp, cts.donec = NewClusterProxy(client, cts.l.Addr().String(), "test-prefix") cts.caddr = cts.l.Addr().String() pb.RegisterClusterServer(cts.server, cts.cp) + close(servec) + + // wait some time for free port 0 to be resolved + time.Sleep(500 * time.Millisecond) return cts } diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go index dd23425a2812..19c2249a7e2a 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go @@ -137,7 +137,7 @@ func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error errc := make(chan error, 2) var lostLeaderC <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { lostLeaderC = lp.leader.lostNotify() diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go index 42748fd4a4fa..b960c94769ac 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go @@ -95,7 +95,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { } var lostLeaderC <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { lostLeaderC = wp.leader.lostNotify() diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 3c45eef003c2..4c6e79d58a0c 100644 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -1558,25 +1558,67 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - m.Nodes = append(m.Nodes, v) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) diff --git a/vendor/github.com/coreos/etcd/scripts/build-docker b/vendor/github.com/coreos/etcd/scripts/build-docker index b7aea2bc248a..40a4dd3a9697 100755 --- a/vendor/github.com/coreos/etcd/scripts/build-docker +++ b/vendor/github.com/coreos/etcd/scripts/build-docker @@ -10,21 +10,20 @@ fi VERSION=${1} ARCH=$(go env GOARCH) DOCKERFILE="Dockerfile-release" -: ${TAG:="quay.io/coreos/etcd"} -if [ -z ${BINARYDIR} ]; then - RELEASE="etcd-${1}"-`go env GOOS`-`go env GOARCH` +if [ -z "${BINARYDIR}" ]; then + RELEASE="etcd-${1}"-$(go env GOOS)-$(go env GOARCH) BINARYDIR="${RELEASE}" TARFILE="${RELEASE}.tar.gz" TARURL="https://github.com/coreos/etcd/releases/download/${1}/${TARFILE}" - if ! curl -f -L -o ${TARFILE} ${TARURL} ; then + if ! curl -f -L -o "${TARFILE}" "${TARURL}" ; then echo "Failed to download ${TARURL}." exit 1 fi - tar -zvxf ${TARFILE} + tar -zvxf "${TARFILE}" fi -if [ ${ARCH} != "amd64" ]; then +if [ "${ARCH}" != "amd64" ]; then DOCKERFILE+=".${ARCH}" VERSION+="-${ARCH}" fi @@ -34,10 +33,15 @@ BUILDDIR=${BUILDDIR:-.} IMAGEDIR=${BUILDDIR}/image-docker -mkdir -p ${IMAGEDIR}/var/etcd -mkdir -p ${IMAGEDIR}/var/lib/etcd -cp ${BINARYDIR}/etcd ${BINARYDIR}/etcdctl ${IMAGEDIR} +mkdir -p "${IMAGEDIR}"/var/etcd +mkdir -p "${IMAGEDIR}"/var/lib/etcd +cp "${BINARYDIR}"/etcd "${BINARYDIR}"/etcdctl "${IMAGEDIR}" -cat ./${DOCKERFILE} > ${IMAGEDIR}/Dockerfile +cat ./"${DOCKERFILE}" > "${IMAGEDIR}"/Dockerfile -docker build -t ${TAG}:${VERSION} ${IMAGEDIR} +if [ -z "$TAG" ]; then + docker build -t "gcr.io/etcd-development/etcd:${VERSION}" "${IMAGEDIR}" + docker build -t "quay.io/coreos/etcd:${VERSION}" "${IMAGEDIR}" +else + docker build -t "${TAG}:${VERSION}" "${IMAGEDIR}" +fi diff --git a/vendor/github.com/coreos/etcd/scripts/genproto.sh b/vendor/github.com/coreos/etcd/scripts/genproto.sh index 1b46073459d3..ae0058875a65 100755 --- a/vendor/github.com/coreos/etcd/scripts/genproto.sh +++ b/vendor/github.com/coreos/etcd/scripts/genproto.sh @@ -11,8 +11,8 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then fi # for now, be conservative about what version of protoc we expect -if ! [[ $(protoc --version) =~ "3.2.0" ]]; then - echo "could not find protoc 3.2.0, is it installed + in PATH?" +if ! [[ $(protoc --version) =~ "3.4.0" ]]; then + echo "could not find protoc 3.4.0, is it installed + in PATH?" exit 255 fi @@ -20,8 +20,8 @@ fi DIRS="./wal/walpb ./etcdserver/etcdserverpb ./snap/snappb ./raft/raftpb ./mvcc/mvccpb ./lease/leasepb ./auth/authpb ./etcdserver/api/v3lock/v3lockpb ./etcdserver/api/v3election/v3electionpb" # exact version of protoc-gen-gogo to build -GOGO_PROTO_SHA="8d70fb3182befc465c4a1eac8ad4d38ff49778e2" -GRPC_GATEWAY_SHA="84398b94e188ee336f307779b57b3aa91af7063c" +GOGO_PROTO_SHA="100ba4e885062801d56799d78530b73b178a78f3" +GRPC_GATEWAY_SHA="8cc3a55af3bcf171a1c23a90c4df9cf591706104" # set up self-contained GOPATH for building export GOPATH=${PWD}/gopath.proto diff --git a/vendor/github.com/coreos/etcd/test b/vendor/github.com/coreos/etcd/test index 96ab3233abf1..08be1b2ed603 100755 --- a/vendor/github.com/coreos/etcd/test +++ b/vendor/github.com/coreos/etcd/test @@ -38,13 +38,13 @@ IGNORE_PKGS="(cmd/|etcdserverpb|rafttest|gopath.proto|v3lockpb|v3electionpb)" INTEGRATION_PKGS="(integration|e2e|contrib|functional-tester)" # all github.com/coreos/etcd/whatever pkgs that are not auto-generated / tools -PKGS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | egrep -v "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g" | xargs echo` +PKGS=$(find . -name \*.go | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | grep -vE "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g" | xargs echo) # pkg1,pkg2,pkg3 PKGS_COMMA=${PKGS// /,} -TEST_PKGS=`find . -name \*_test.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` -FORMATTABLE=`find . -name \*.go | while read a; do echo "$(dirname $a)/*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` -TESTABLE_AND_FORMATTABLE=`echo "$TEST_PKGS" | egrep -v "$INTEGRATION_PKGS"` +TEST_PKGS=$(find . -name \*_test.go | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | sed "s|\./||g") +FORMATTABLE=$(find . -name \*.go | while read -r a; do echo "$(dirname "$a")/*.go"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | sed "s|\./||g") +TESTABLE_AND_FORMATTABLE=$(echo "$TEST_PKGS" | grep -vE "$INTEGRATION_PKGS") # check if user provided PKG override if [ -z "${USERPKG}" ]; then @@ -58,20 +58,23 @@ else # only run gofmt on packages provided by user FMT="$TEST" fi +FMT=($FMT) -# split TEST into an array and prepend REPO_PATH to each local package -split=(${TEST// / }) -TEST=${split/#/${REPO_PATH}/} +# prepend REPO_PATH to each local package +split=$TEST +TEST="" +for a in $split; do TEST="$TEST ${REPO_PATH}/${a}"; done +TEST=($TEST) # TODO: 'client' pkg fails with gosimple from generated files # TODO: 'rafttest' is failing with unused -STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'` +STATIC_ANALYSIS_PATHS=$(find . -name \*.go | while read -r a; do dirname "$a"; done | sort | uniq | grep -vE "$IGNORE_PKGS" | grep -v 'client') +STATIC_ANALYSIS_PATHS=($STATIC_ANALYSIS_PATHS) if [ -z "$GOARCH" ]; then GOARCH=$(go env GOARCH); fi - # determine whether target supports race detection if [ "$GOARCH" == "amd64" ]; then RACE="--race" @@ -79,8 +82,16 @@ fi function unit_pass { echo "Running unit tests..." + GO_TEST_FLAG="" + if [ "${VERBOSE}" == "1" ]; then + GO_TEST_FLAG="-v" + fi + if [ "${VERBOSE}" == "2" ]; then + GO_TEST_FLAG="-v" + export CLIENT_DEBUG=1 + fi # only -run=Test so examples can run in integration tests - go test -timeout 3m ${COVER} ${RACE} -cpu 1,2,4 -run=Test $@ ${TEST} + go test ${GO_TEST_FLAG} -timeout 5m "${COVER}" ${RACE} -cpu 1,2,4 -run=Test "$@" "${TEST[@]}" } function integration_pass { @@ -93,6 +104,8 @@ function integration_pass { } function functional_pass { + # Clean up any data and logs from previous runs + rm -rf ./agent-* for a in 1 2 3; do mkdir -p ./agent-$a ./bin/etcd-agent -etcd-path ./bin/etcd -etcd-log-dir "./agent-$a" -port ":${a}9027" -use-root=false & @@ -121,87 +134,33 @@ function functional_pass { echo "Waiting for processes to exit" kill -s TERM ${agent_pids} for a in ${agent_pids}; do wait $a || true; done - rm -rf ./agent-* if [[ "${ETCD_TESTER_EXIT_CODE}" -ne "0" ]]; then - echo "FAIL with exit code" ${ETCD_TESTER_EXIT_CODE} + echo "--- FAIL: exit code" ${ETCD_TESTER_EXIT_CODE} exit ${ETCD_TESTER_EXIT_CODE} fi } -function cov_pass { - echo "Running code coverage..." - # install gocovmerge before running code coverage from github.com/wadey/gocovmerge - # gocovmerge merges coverage files - if ! which gocovmerge >/dev/null; then - echo "gocovmerge not installed" - exit 255 - fi - - if [ -z "$COVERDIR" ]; then - echo "COVERDIR undeclared" - exit 255 - fi - - if [ ! -f "bin/etcd_test" ]; then - echo "etcd_test binary not found" - exit 255 - fi - - mkdir -p "$COVERDIR" - - # run code coverage for unit and integration tests - GOCOVFLAGS="-covermode=set -coverpkg $PKGS_COMMA -v -timeout 15m" - failed="" - for t in `echo "${TEST_PKGS}" | egrep -v "(e2e|functional-tester)"`; do - tf=`echo $t | tr / _` - # cache package compilation data for faster repeated builds - go test $GOCOVFLAGS -i ${REPO_PATH}/$t || true - # uses -run=Test to skip examples because clientv3/ example tests will leak goroutines - go test $GOCOVFLAGS -run=Test -coverprofile "$COVERDIR/${tf}.coverprofile" ${REPO_PATH}/$t || failed="$failed $t" - done - - # proxy tests - go test -tags cluster_proxy $GOCOVFLAGS -coverprofile "$COVERDIR/proxy_integration.coverprofile" ${REPO_PATH}/integration || failed="$failed proxy-integration" - go test -tags cluster_proxy $GOCOVFLAGS -coverprofile "$COVERDIR/proxy_clientv3.coverprofile" ${REPO_PATH}/clientv3/integration || failed="$failed proxy-clientv3/integration" - - # run code coverage for e2e tests - # use 30m timeout because e2e coverage takes longer - # due to many tests cause etcd process to wait - # on leadership transfer timeout during gracefully shutdown - go test -tags cov -timeout 30m -v ${REPO_PATH}"/e2e" || failed="$failed e2e" - - gocovmerge "$COVERDIR"/*.coverprofile >"$COVERDIR"/cover.out - # strip out generated files (using GNU-style sed) - sed --in-place '/generated.go/d' "$COVERDIR"/cover.out || true - - # held failures to generate the full coverage file, now fail - if [ -n "$failed" ]; then - for f in $failed; do - echo FAIL $f - done - exit 255 - fi -} - function e2e_pass { echo "Running e2e tests..." go test -timeout 15m -v -cpu 1,2,4 $@ ${REPO_PATH}/e2e } +function integration_extra { + go test -timeout 15m -v ${RACE} -cpu 1,2,4 "$@" "${REPO_PATH}/client/integration" + go test -timeout 20m -v ${RACE} -cpu 1,2,4 "$@" "${REPO_PATH}/clientv3/integration" +} + function integration_e2e_pass { echo "Running integration and e2e tests..." - go test -timeout 15m -v -cpu 1,2,4 $@ ${REPO_PATH}/e2e & + go test -timeout 15m -v -cpu 1,2,4 "$@" "${REPO_PATH}/e2e" & e2epid="$!" - go test -timeout 15m -v -cpu 1,2,4 $@ ${REPO_PATH}/integration & + go test -timeout 15m -v -cpu 1,2,4 "$@" "${REPO_PATH}/integration" & intpid="$!" wait $e2epid wait $intpid - go test -timeout 1m -v ${RACE} -cpu 1,2,4 $@ ${REPO_PATH}/client/integration - go test -timeout 10m -v ${RACE} -cpu 1,2,4 $@ ${REPO_PATH}/clientv3/integration - go test -timeout 1m -v -cpu 1,2,4 $@ ${REPO_PATH}/contrib/raftexample - go test -timeout 1m -v ${RACE} -cpu 1,2,4 -run=Example $@ ${TEST} + integration_extra "$@" } function grpcproxy_pass { @@ -231,7 +190,7 @@ function release_pass { set -e case $result in 0) ;; - *) echo "FAIL with" ${result} + *) echo "--- FAIL:" ${result} exit $result ;; esac @@ -258,30 +217,6 @@ function fmt_pass { exit 255 fi - echo "Checking 'go tool vet -all -shadow'..." - fmtpkgs=$(echo $FMT | xargs dirname | sort | uniq | sed '/\./d') - vetRes=$(go tool vet -all -shadow ${fmtpkgs} 2>&1 | grep -v '/gw/' || true) - if [ -n "${vetRes}" ]; then - echo -e "govet -all -shadow checking failed:\n${vetRes}" - exit 255 - fi - - if which shellcheck >/dev/null; then - echo "Checking shellcheck..." - shellcheckResult=$(shellcheck -fgcc build test scripts/* 2>&1 || true) - if [ -n "${shellcheckResult}" ]; then - # mask the most common ones; fix later - SHELLCHECK_MASK="SC(2086|2006|2068|2196|2035|2162|2076)" - errs=$(echo "${shellcheckResult}" | egrep -v "${SHELLCHECK_MASK}" || true) - if [ -n "${errs}" ]; then - echo -e "shellcheck checking failed:\n${shellcheckResult}\n===\nFailed:\n${errs}" - exit 255 - fi - suppressed=$(echo "${shellcheckResult}" | cut -f4- -d':' | sort | uniq -c | sort -n) - echo -e "shellcheck suppressed warnings:\n${suppressed}" - fi - fi - echo "Checking documentation style..." # eschew you yous=`find . -name \*.md -exec egrep --color "[Yy]ou[r]?[ '.,;]" {} + | grep -v /v2/ || true` @@ -302,22 +237,6 @@ function fmt_pass { echo "Skipping marker..." fi - if which goword >/dev/null; then - echo "Checking goword..." - # get all go files to process - gofiles=`find $FMT -iname '*.go' 2>/dev/null` - # ignore tests and protobuf files - gofiles=`echo ${gofiles} | sort | uniq | sed "s/ /\n/g" | egrep -v "(\\_test.go|\\.pb\\.go)"` - # only check for broken exported godocs - gowordRes=`goword -use-spell=false ${gofiles} | grep godoc-export | sort` - if [ ! -z "$gowordRes" ]; then - echo -e "goword checking failed:\n${gowordRes}" - exit 255 - fi - else - echo "Skipping goword..." - fi - if which gosimple >/dev/null; then echo "Checking gosimple..." gosimpleResult=`gosimple ${STATIC_ANALYSIS_PATHS} 2>&1 || true` diff --git a/vendor/github.com/coreos/etcd/tools/benchmark/cmd/util.go b/vendor/github.com/coreos/etcd/tools/benchmark/cmd/util.go index a5872913dfc1..4af0b7acd8dd 100644 --- a/vendor/github.com/coreos/etcd/tools/benchmark/cmd/util.go +++ b/vendor/github.com/coreos/etcd/tools/benchmark/cmd/util.go @@ -17,13 +17,14 @@ package cmd import ( "crypto/rand" "fmt" - "log" "os" "strings" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/report" + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" ) var ( @@ -98,7 +99,7 @@ func mustCreateConn() *clientv3.Client { return mustCreateConn() } - clientv3.SetLogger(log.New(os.Stderr, "grpc", 0)) + clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) if err != nil { fmt.Fprintf(os.Stderr, "dial error: %v\n", err) diff --git a/vendor/github.com/coreos/etcd/tools/etcd-dump-db/backend.go b/vendor/github.com/coreos/etcd/tools/etcd-dump-db/backend.go index 5a509586f3e1..618d8114934c 100644 --- a/vendor/github.com/coreos/etcd/tools/etcd-dump-db/backend.go +++ b/vendor/github.com/coreos/etcd/tools/etcd-dump-db/backend.go @@ -18,7 +18,7 @@ import ( "fmt" "path/filepath" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" ) diff --git a/vendor/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go b/vendor/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go index 30e8d47d703b..f9ab3f9fbc65 100644 --- a/vendor/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go +++ b/vendor/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go @@ -21,11 +21,8 @@ import ( "time" "golang.org/x/time/rate" - "google.golang.org/grpc/grpclog" ) -func init() { grpclog.SetLogger(plog) } - type Stresser interface { // Stress starts to stress the etcd cluster Stress() error diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index 2da277bd5ca4..cd25875f4ec3 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.2.8" + Version = "3.2.16" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/.travis.yml b/vendor/github.com/grpc-ecosystem/grpc-gateway/.travis.yml index 61ec5207f5d6..9ca6f5d7020d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/.travis.yml +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/.travis.yml @@ -1,8 +1,10 @@ language: go sudo: false go: -- 1.7.3 -- tip +- 1.7.x +- 1.8.x +- 1.9.x +- master go_import_path: github.com/grpc-ecosystem/grpc-gateway cache: directories: @@ -10,10 +12,11 @@ cache: - ${TRAVIS_BUILD_DIR}/examples/browser/node_modules before_install: - ./.travis/install-protoc.sh 3.1.0 -- ./.travis/install-swagger-codegen.sh 2.1.6 +- ./.travis/install-swagger-codegen.sh 2.2.2 - nvm install v6.1 && nvm use v6.1 && node --version - go get github.com/golang/lint/golint - go get github.com/dghubble/sling +- go get github.com/go-resty/resty install: - go get github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway - go get github.com/grpc-ecosystem/grpc-gateway/runtime @@ -23,10 +26,13 @@ before_script: - sh -c 'cd examples/browser && npm install' script: - make realclean && make examples SWAGGER_CODEGEN="java -jar $HOME/local/swagger-codegen-cli.jar" -- if ! go version | grep devel; then test -z "$(git status --porcelain)" || (git status; git diff; exit 1); fi +- if (go version | grep -q 1.8) && [ -z "${GATEWAY_PLUGIN_FLAGS}" ]; then test -z "$(git status --porcelain)" || (git status; git diff; exit 1); fi - env GLOG_logtostderr=1 go test -race -v github.com/grpc-ecosystem/grpc-gateway/... - make lint -- sh -c 'cd examples/browser && gulp' +- sh -c 'cd examples/browser && node ./node_modules/gulp/bin/gulp' env: global: - "PATH=$PATH:$HOME/local/bin" + matrix: + - GATEWAY_PLUGIN_FLAGS= + - GATEWAY_PLUGIN_FLAGS=request_context=true diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/grpc-gateway/CHANGELOG.md new file mode 100644 index 000000000000..3d613fec0848 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/CHANGELOG.md @@ -0,0 +1,169 @@ +# Change Log + +## [v1.2.2](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.2.2) (2017-04-17) +**Merged pull requests:** + +- metadata: fix properly and change to Outgoing [\#361](https://github.com/grpc-ecosystem/grpc-gateway/pull/361) + +## [v1.2.1](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.2.1) (2017-04-17) +**Merged pull requests:** + +- Add changelog for 1.2.1 [\#360](https://github.com/grpc-ecosystem/grpc-gateway/pull/360) +- bugfix: reflect upstream api change. [\#359](https://github.com/grpc-ecosystem/grpc-gateway/pull/359) + +## [v1.2.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.2.0) (2017-03-31) +**Merged pull requests:** + +- Add changelog for 1.2.0 [\#342](https://github.com/grpc-ecosystem/grpc-gateway/pull/342) + +## [v1.2.0.rc1](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.2.0.rc1) (2017-03-24) +**Merged pull requests:** + +- Update go\_out parameter to remove comma [\#333](https://github.com/grpc-ecosystem/grpc-gateway/pull/333) +- Update stale path in README [\#332](https://github.com/grpc-ecosystem/grpc-gateway/pull/332) +- improve documentation regarding external dependencies [\#330](https://github.com/grpc-ecosystem/grpc-gateway/pull/330) +- Return an error on invalid nested query parameters. [\#329](https://github.com/grpc-ecosystem/grpc-gateway/pull/329) +- Update upstream proto files and add google.golang.org/genproto support. [\#325](https://github.com/grpc-ecosystem/grpc-gateway/pull/325) +- Support oneof fields in query params [\#321](https://github.com/grpc-ecosystem/grpc-gateway/pull/321) +- Do not ignore the error coming from http.ListenAndServe in examples [\#319](https://github.com/grpc-ecosystem/grpc-gateway/pull/319) +- Look up enum value maps by their proto name [\#315](https://github.com/grpc-ecosystem/grpc-gateway/pull/315) +- enable parsing enums from query parameters [\#314](https://github.com/grpc-ecosystem/grpc-gateway/pull/314) +- Do not add imports from methods with no bindings. [\#312](https://github.com/grpc-ecosystem/grpc-gateway/pull/312) +- Convert the first letter of method name to upper [\#300](https://github.com/grpc-ecosystem/grpc-gateway/pull/300) +- write query parameters to swagger definition [\#297](https://github.com/grpc-ecosystem/grpc-gateway/pull/297) +- Bump swagger-client to 2.1.28 for examples/browser [\#290](https://github.com/grpc-ecosystem/grpc-gateway/pull/290) +- pin to version before es6ism [\#289](https://github.com/grpc-ecosystem/grpc-gateway/pull/289) +- Prevent lack of http bindings from generating non-building output [\#286](https://github.com/grpc-ecosystem/grpc-gateway/pull/286) +- Added support for Timestamp in URL. [\#281](https://github.com/grpc-ecosystem/grpc-gateway/pull/281) +- add plugin param 'allow\_delete\_body' [\#280](https://github.com/grpc-ecosystem/grpc-gateway/pull/280) +- Fix ruby gen command [\#275](https://github.com/grpc-ecosystem/grpc-gateway/pull/275) +- Make grpc-gateway support enum fields in path parameter [\#273](https://github.com/grpc-ecosystem/grpc-gateway/pull/273) +- remove unnecessary make\(\) [\#271](https://github.com/grpc-ecosystem/grpc-gateway/pull/271) +- preserve field order in swagger spec [\#270](https://github.com/grpc-ecosystem/grpc-gateway/pull/270) +- Merge \#228 [\#268](https://github.com/grpc-ecosystem/grpc-gateway/pull/268) +- Handle methods with no bindings more carefully [\#267](https://github.com/grpc-ecosystem/grpc-gateway/pull/267) +- describe default marshaler in README.md [\#266](https://github.com/grpc-ecosystem/grpc-gateway/pull/266) +- Add request\_context flag to utilize \(\*http.Request\).Context\(\) in handlers [\#265](https://github.com/grpc-ecosystem/grpc-gateway/pull/265) +- Regenerate examples [\#264](https://github.com/grpc-ecosystem/grpc-gateway/pull/264) +- Correct runtime.errorBody protobuf field tag [\#256](https://github.com/grpc-ecosystem/grpc-gateway/pull/256) +- Pass permanent HTTP request headers [\#252](https://github.com/grpc-ecosystem/grpc-gateway/pull/252) +- regenerate examples, fix tests for go tip [\#248](https://github.com/grpc-ecosystem/grpc-gateway/pull/248) +- Render the swagger request body properly [\#247](https://github.com/grpc-ecosystem/grpc-gateway/pull/247) +- Error output should have lowercase attribute names [\#244](https://github.com/grpc-ecosystem/grpc-gateway/pull/244) +- runtime - export prefix constants [\#236](https://github.com/grpc-ecosystem/grpc-gateway/pull/236) +- README - Add CoreOS example [\#231](https://github.com/grpc-ecosystem/grpc-gateway/pull/231) +- Docs - Add section about how HTTP maps to gRPC [\#227](https://github.com/grpc-ecosystem/grpc-gateway/pull/227) +- readme: added links to additional documentation [\#222](https://github.com/grpc-ecosystem/grpc-gateway/pull/222) +- Use a released version of protoc [\#216](https://github.com/grpc-ecosystem/grpc-gateway/pull/216) +- Add contribution guideline [\#210](https://github.com/grpc-ecosystem/grpc-gateway/pull/210) +- improve\(genswagger:template\):added support for google.protobuf.Timestamp [\#209](https://github.com/grpc-ecosystem/grpc-gateway/pull/209) +- Allowing unknown fields to be dropped instead of returning error from… [\#208](https://github.com/grpc-ecosystem/grpc-gateway/pull/208) +- Avoid Internal Server Error on zero-length input for bidi streaming [\#200](https://github.com/grpc-ecosystem/grpc-gateway/pull/200) + +## [v1.1.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.1.0) (2016-07-23) +**Merged pull requests:** + +- Rename packages to follow the repository transfer [\#192](https://github.com/grpc-ecosystem/grpc-gateway/pull/192) +- return err early if EOF to prevent logging in normal conditions [\#191](https://github.com/grpc-ecosystem/grpc-gateway/pull/191) +- send Trailer header on error [\#188](https://github.com/grpc-ecosystem/grpc-gateway/pull/188) +- generate swagger output for streaming endpoints with a basic note [\#183](https://github.com/grpc-ecosystem/grpc-gateway/pull/183) + +## [v1.0.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.0.0) (2016-06-15) +**Merged pull requests:** + +- Regenerate files with the latest protoc-gen-go [\#185](https://github.com/grpc-ecosystem/grpc-gateway/pull/185) +- Add browser examples [\#184](https://github.com/grpc-ecosystem/grpc-gateway/pull/184) +- Fix golint and go vet errors [\#182](https://github.com/grpc-ecosystem/grpc-gateway/pull/182) +- Add integration with clients generated by swagger-codegen [\#181](https://github.com/grpc-ecosystem/grpc-gateway/pull/181) +- Simplify example services [\#180](https://github.com/grpc-ecosystem/grpc-gateway/pull/180) +- Avoid errors when req.RemoteAddr is empty [\#178](https://github.com/grpc-ecosystem/grpc-gateway/pull/178) +- Feature/headers [\#176](https://github.com/grpc-ecosystem/grpc-gateway/pull/176) +- Include HTTP req.remoteAddr in gRPC ctx [\#174](https://github.com/grpc-ecosystem/grpc-gateway/pull/174) +- Update dependencies [\#171](https://github.com/grpc-ecosystem/grpc-gateway/pull/171) +- Add bidirectional streaming support by running Send\(\) and Recv\(\) concurrently [\#170](https://github.com/grpc-ecosystem/grpc-gateway/pull/170) +- make Authorization header check case-insensitive to comply with RFC 2616 4.2 [\#164](https://github.com/grpc-ecosystem/grpc-gateway/pull/164) +- jsonpb: avoid duplicating upstream's struct [\#158](https://github.com/grpc-ecosystem/grpc-gateway/pull/158) +- Generate Swagger description for service methods using proto comments. [\#156](https://github.com/grpc-ecosystem/grpc-gateway/pull/156) +- Implement gRPC timeout support for inbound HTTP headers [\#155](https://github.com/grpc-ecosystem/grpc-gateway/pull/155) +- Add more examples to marshalers [\#154](https://github.com/grpc-ecosystem/grpc-gateway/pull/154) +- custom marshaler: handle `Accept` headers correctly [\#152](https://github.com/grpc-ecosystem/grpc-gateway/pull/152) +- Simplify custom marshaler API [\#151](https://github.com/grpc-ecosystem/grpc-gateway/pull/151) +- Fix camel case path parameter handling in swagger [\#149](https://github.com/grpc-ecosystem/grpc-gateway/pull/149) +- Swagger dot in path template [\#148](https://github.com/grpc-ecosystem/grpc-gateway/pull/148) +- Support map types in swagger generator [\#147](https://github.com/grpc-ecosystem/grpc-gateway/pull/147) +- Cleanup custom marshaler [\#146](https://github.com/grpc-ecosystem/grpc-gateway/pull/146) +- Implement custom Marshaler support, add jsonpb implemention. [\#144](https://github.com/grpc-ecosystem/grpc-gateway/pull/144) +- Allow period in path URL templates when generating Swagger templates. [\#143](https://github.com/grpc-ecosystem/grpc-gateway/pull/143) +- Link to LICENSE.txt [\#142](https://github.com/grpc-ecosystem/grpc-gateway/pull/142) +- Support map types in swagger generator [\#141](https://github.com/grpc-ecosystem/grpc-gateway/pull/141) +- Conditionally stops checking if generated file are up-to-date [\#136](https://github.com/grpc-ecosystem/grpc-gateway/pull/136) +- Generate Swagger description for service methods using proto comments. [\#134](https://github.com/grpc-ecosystem/grpc-gateway/pull/134) +- Swagger definitions now have `type` set to `object`. [\#133](https://github.com/grpc-ecosystem/grpc-gateway/pull/133) +- go\_package option as go import path [\#129](https://github.com/grpc-ecosystem/grpc-gateway/pull/129) +- Fix govet errors [\#126](https://github.com/grpc-ecosystem/grpc-gateway/pull/126) +- Fix data-race in generated codes [\#125](https://github.com/grpc-ecosystem/grpc-gateway/pull/125) +- Fix \#119 - CloseNotify race with ServeHTTP [\#120](https://github.com/grpc-ecosystem/grpc-gateway/pull/120) +- Replace glog with grpclog [\#118](https://github.com/grpc-ecosystem/grpc-gateway/pull/118) +- Fix a goroutine-leak in HTTP keep-alive [\#116](https://github.com/grpc-ecosystem/grpc-gateway/pull/116) +- Fix camel case path parameter handling in swagger [\#114](https://github.com/grpc-ecosystem/grpc-gateway/pull/114) +- gofmt -s [\#112](https://github.com/grpc-ecosystem/grpc-gateway/pull/112) +- fix typo [\#111](https://github.com/grpc-ecosystem/grpc-gateway/pull/111) +- fix typo [\#110](https://github.com/grpc-ecosystem/grpc-gateway/pull/110) +- fixes missing swagger operation objects [\#109](https://github.com/grpc-ecosystem/grpc-gateway/pull/109) +- Add parser and swagger support for enum, no gengateway yet [\#108](https://github.com/grpc-ecosystem/grpc-gateway/pull/108) +- README: add protoc-gen-swagger too [\#105](https://github.com/grpc-ecosystem/grpc-gateway/pull/105) +- README: Suggest go get -u by default. [\#104](https://github.com/grpc-ecosystem/grpc-gateway/pull/104) +- Cancel context when HTTP connection is closed [\#102](https://github.com/grpc-ecosystem/grpc-gateway/pull/102) +- wait test server up [\#100](https://github.com/grpc-ecosystem/grpc-gateway/pull/100) +- Fix the swagger section of the README.md [\#98](https://github.com/grpc-ecosystem/grpc-gateway/pull/98) +- Add documentation for using Swagger [\#97](https://github.com/grpc-ecosystem/grpc-gateway/pull/97) +- Better compatibility to field names generated by protoc-gen-go [\#96](https://github.com/grpc-ecosystem/grpc-gateway/pull/96) +- Update protoc from 3.0.0-beta1 to 3.0.0-beta2 [\#95](https://github.com/grpc-ecosystem/grpc-gateway/pull/95) +- Better grpc error strings [\#94](https://github.com/grpc-ecosystem/grpc-gateway/pull/94) +- make available header and trailer metadata [\#93](https://github.com/grpc-ecosystem/grpc-gateway/pull/93) +- make grpc.DialOption configurable [\#89](https://github.com/grpc-ecosystem/grpc-gateway/pull/89) +- Add request in error handlers [\#88](https://github.com/grpc-ecosystem/grpc-gateway/pull/88) +- Improve PascalFromSnake behavior [\#85](https://github.com/grpc-ecosystem/grpc-gateway/pull/85) +- Typo grcp -\> grpc [\#81](https://github.com/grpc-ecosystem/grpc-gateway/pull/81) +- Add abstraction of code generator implementation [\#78](https://github.com/grpc-ecosystem/grpc-gateway/pull/78) +- Support multivalue of metadata [\#77](https://github.com/grpc-ecosystem/grpc-gateway/pull/77) +- Fix broken test [\#76](https://github.com/grpc-ecosystem/grpc-gateway/pull/76) +- Added missing instruction line in README [\#75](https://github.com/grpc-ecosystem/grpc-gateway/pull/75) +- Fix a complie error in generated go files [\#71](https://github.com/grpc-ecosystem/grpc-gateway/pull/71) +- Update generated .pb.go files in third\_party [\#69](https://github.com/grpc-ecosystem/grpc-gateway/pull/69) +- Add swagger support [\#68](https://github.com/grpc-ecosystem/grpc-gateway/pull/68) +- Bugfix/handling headers for `Authorization` and `Host` [\#65](https://github.com/grpc-ecosystem/grpc-gateway/pull/65) +- Fix `error` field always in chunk response [\#64](https://github.com/grpc-ecosystem/grpc-gateway/pull/64) +- Update .pb.go to latest version. [\#63](https://github.com/grpc-ecosystem/grpc-gateway/pull/63) +- Run more tests in Travis CI [\#60](https://github.com/grpc-ecosystem/grpc-gateway/pull/60) +- Added http error code and error status for responseStreamChunk error [\#59](https://github.com/grpc-ecosystem/grpc-gateway/pull/59) +- Fix parsing of verb and final path component. [\#55](https://github.com/grpc-ecosystem/grpc-gateway/pull/55) +- Add runtime.WithForwardResponseOption [\#53](https://github.com/grpc-ecosystem/grpc-gateway/pull/53) +- add grpc.WithInsecure\(\) as option for grpc.Dial call in template [\#52](https://github.com/grpc-ecosystem/grpc-gateway/pull/52) +- update .pb.go files for latest golang proto generation [\#51](https://github.com/grpc-ecosystem/grpc-gateway/pull/51) +- Fix a build error with the latest protoc-gen-go [\#50](https://github.com/grpc-ecosystem/grpc-gateway/pull/50) +- Configure Travis CI [\#49](https://github.com/grpc-ecosystem/grpc-gateway/pull/49) +- Follow a change of go package name convention in protoc-gen-go [\#48](https://github.com/grpc-ecosystem/grpc-gateway/pull/48) +- Consider tail segments after deep wildcard [\#47](https://github.com/grpc-ecosystem/grpc-gateway/pull/47) +- Fix typo in README [\#45](https://github.com/grpc-ecosystem/grpc-gateway/pull/45) +- Fix undefined variable error in generated codes [\#42](https://github.com/grpc-ecosystem/grpc-gateway/pull/42) +- Follow changes in protoc-gen-go and grpc-go [\#41](https://github.com/grpc-ecosystem/grpc-gateway/pull/41) +- Fixes \#4 [\#40](https://github.com/grpc-ecosystem/grpc-gateway/pull/40) +- fix examples to work with go1.5 [\#39](https://github.com/grpc-ecosystem/grpc-gateway/pull/39) +- rename internal to utilties for 1.5 compatibility [\#38](https://github.com/grpc-ecosystem/grpc-gateway/pull/38) +- Reflection fix of proto3 nested messages. [\#34](https://github.com/grpc-ecosystem/grpc-gateway/pull/34) +- \[Experimental\] Make the response forwarder function customizable [\#31](https://github.com/grpc-ecosystem/grpc-gateway/pull/31) +- Add f.Flush\(\) to runtime.ForwardResponseStream [\#30](https://github.com/grpc-ecosystem/grpc-gateway/pull/30) +- Format error message in JSON [\#29](https://github.com/grpc-ecosystem/grpc-gateway/pull/29) +- Update examples with HTTP header context annotation [\#28](https://github.com/grpc-ecosystem/grpc-gateway/pull/28) +- Report semantic errors in the source to protoc [\#27](https://github.com/grpc-ecosystem/grpc-gateway/pull/27) +- Add support for non-nullable nested messages. [\#21](https://github.com/grpc-ecosystem/grpc-gateway/pull/21) +- Receive GRPC metadata from HTTP headers. [\#18](https://github.com/grpc-ecosystem/grpc-gateway/pull/18) +- Implement detailed specs of google.api.http [\#14](https://github.com/grpc-ecosystem/grpc-gateway/pull/14) +- Configure travis CI [\#13](https://github.com/grpc-ecosystem/grpc-gateway/pull/13) +- Replace our own custom option with the one defined by Google [\#12](https://github.com/grpc-ecosystem/grpc-gateway/pull/12) +- Remove useless context setup [\#11](https://github.com/grpc-ecosystem/grpc-gateway/pull/11) +- Fix typo, path, missing semicolon. [\#10](https://github.com/grpc-ecosystem/grpc-gateway/pull/10) +- Use a globally unique id for the custom option [\#3](https://github.com/grpc-ecosystem/grpc-gateway/pull/3) +- implement ABitOfEverythingService [\#2](https://github.com/grpc-ecosystem/grpc-gateway/pull/2) +- support streaming API calls [\#1](https://github.com/grpc-ecosystem/grpc-gateway/pull/1) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/grpc-gateway/CONTRIBUTING.md index dbf0132094b4..94983bc97e22 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/CONTRIBUTING.md +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/CONTRIBUTING.md @@ -9,7 +9,7 @@ Here's the recommended process of contribution. 4. Make sure that your change follows best practices in Go * [Effective Go](https://golang.org/doc/effective_go.html) * [Go Code Review Comments](https://golang.org/wiki/CodeReviewComments) -5. Make sure that `make test` passes. +5. Make sure that `make test` passes. (use swagger-codegen 2.1.6, not newer versions) 6. Sign [a Contributor License Agreement](https://cla.developers.google.com/clas) 7. Open a pull request in Github diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/Makefile b/vendor/github.com/grpc-ecosystem/grpc-gateway/Makefile index 756f678aeaa0..c6a645e0ef7d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/Makefile +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/Makefile @@ -35,16 +35,22 @@ GATEWAY_PLUGIN_SRC= utilities/doc.go \ protoc-gen-grpc-gateway/httprule/parse.go \ protoc-gen-grpc-gateway/httprule/types.go \ protoc-gen-grpc-gateway/main.go +GATEWAY_PLUGIN_FLAGS?= GOOGLEAPIS_DIR=third_party/googleapis -OPTIONS_PROTO=$(GOOGLEAPIS_DIR)/google/api/annotations.proto $(GOOGLEAPIS_DIR)/google/api/http.proto -OPTIONS_GO=$(OPTIONS_PROTO:.proto=.pb.go) OUTPUT_DIR=_output RUNTIME_PROTO=runtime/internal/stream_chunk.proto RUNTIME_GO=$(RUNTIME_PROTO:.proto=.pb.go) -PKGMAP=Mgoogle/protobuf/descriptor.proto=$(GO_PLUGIN_PKG)/descriptor,Mgoogle/api/annotations.proto=$(PKG)/$(GOOGLEAPIS_DIR)/google/api,Mexamples/sub/message.proto=$(PKG)/examples/sub +OPENAPIV2_PROTO=protoc-gen-swagger/options/openapiv2.proto protoc-gen-swagger/options/annotations.proto +OPENAPIV2_GO=$(OPENAPIV2_PROTO:.proto=.pb.go) + +PKGMAP=Mgoogle/protobuf/descriptor.proto=$(GO_PLUGIN_PKG)/descriptor,Mexamples/sub/message.proto=$(PKG)/examples/sub +ADDITIONAL_FLAGS= +ifneq "$(GATEWAY_PLUGIN_FLAGS)" "" + ADDITIONAL_FLAGS=,$(GATEWAY_PLUGIN_FLAGS) +endif SWAGGER_EXAMPLES=examples/examplepb/echo_service.proto \ examples/examplepb/a_bit_of_everything.proto EXAMPLES=examples/examplepb/echo_service.proto \ @@ -76,23 +82,24 @@ SWAGGER_CODEGEN=swagger-codegen PROTOC_INC_PATH=$(dir $(shell which protoc))/../include -generate: $(OPTIONS_GO) $(RUNTIME_GO) +generate: $(RUNTIME_GO) .SUFFIXES: .go .proto -$(GO_PLUGIN): +$(GO_PLUGIN): go get $(GO_PLUGIN_PKG) go build -o $@ $(GO_PLUGIN_PKG) -$(OPTIONS_GO): $(OPTIONS_PROTO) $(GO_PLUGIN) - protoc -I $(PROTOC_INC_PATH) -I$(GOOGLEAPIS_DIR) --plugin=$(GO_PLUGIN) --go_out=$(PKGMAP):$(GOOGLEAPIS_DIR) $(OPTIONS_PROTO) $(RUNTIME_GO): $(RUNTIME_PROTO) $(GO_PLUGIN) protoc -I $(PROTOC_INC_PATH) --plugin=$(GO_PLUGIN) -I. --go_out=$(PKGMAP):. $(RUNTIME_PROTO) -$(GATEWAY_PLUGIN): $(OPTIONS_GO) $(RUNTIME_GO) $(GATEWAY_PLUGIN_SRC) +$(OPENAPIV2_GO): $(OPENAPIV2_PROTO) $(GO_PLUGIN) + protoc -I $(PROTOC_INC_PATH) --plugin=$(GO_PLUGIN) -I. --go_out=$(PKGMAP):$(GOPATH)/src $(OPENAPIV2_PROTO) + +$(GATEWAY_PLUGIN): $(RUNTIME_GO) $(GATEWAY_PLUGIN_SRC) go build -o $@ $(GATEWAY_PLUGIN_PKG) -$(SWAGGER_PLUGIN): $(OPTIONS_GO) $(SWAGGER_PLUGIN_SRC) +$(SWAGGER_PLUGIN): $(SWAGGER_PLUGIN_SRC) $(OPENAPIV2_GO) go build -o $@ $(SWAGGER_PLUGIN_PKG) $(EXAMPLE_SVCSRCS): $(GO_PLUGIN) $(EXAMPLES) @@ -102,18 +109,22 @@ $(EXAMPLE_DEPSRCS): $(GO_PLUGIN) $(EXAMPLE_DEPS) protoc -I $(PROTOC_INC_PATH) -I. --plugin=$(GO_PLUGIN) --go_out=$(PKGMAP),plugins=grpc:$(OUTPUT_DIR) $(@:.pb.go=.proto) cp $(OUTPUT_DIR)/$(PKG)/$@ $@ || cp $(OUTPUT_DIR)/$@ $@ $(EXAMPLE_GWSRCS): $(GATEWAY_PLUGIN) $(EXAMPLES) - protoc -I $(PROTOC_INC_PATH) -I. -I$(GOOGLEAPIS_DIR) --plugin=$(GATEWAY_PLUGIN) --grpc-gateway_out=logtostderr=true,$(PKGMAP):. $(EXAMPLES) + protoc -I $(PROTOC_INC_PATH) -I. -I$(GOOGLEAPIS_DIR) --plugin=$(GATEWAY_PLUGIN) --grpc-gateway_out=logtostderr=true,$(PKGMAP)$(ADDITIONAL_FLAGS):. $(EXAMPLES) $(EXAMPLE_SWAGGERSRCS): $(SWAGGER_PLUGIN) $(SWAGGER_EXAMPLES) protoc -I $(PROTOC_INC_PATH) -I. -I$(GOOGLEAPIS_DIR) --plugin=$(SWAGGER_PLUGIN) --swagger_out=logtostderr=true,$(PKGMAP):. $(SWAGGER_EXAMPLES) $(ECHO_EXAMPLE_SRCS): $(ECHO_EXAMPLE_SPEC) $(SWAGGER_CODEGEN) generate -i $(ECHO_EXAMPLE_SPEC) \ - -l go -o examples/clients --additional-properties packageName=echo - @rm -f $(EXAMPLE_CLIENT_DIR)/README.md $(EXAMPLE_CLIENT_DIR)/git_push.sh $(EXAMPLE_CLIENT_DIR)/.gitignore + -l go -o examples/clients/echo --additional-properties packageName=echo + @rm -f $(EXAMPLE_CLIENT_DIR)/echo/README.md \ + $(EXAMPLE_CLIENT_DIR)/echo/git_push.sh \ + $(EXAMPLE_CLIENT_DIR)/echo/.travis.yml $(ABE_EXAMPLE_SRCS): $(ABE_EXAMPLE_SPEC) $(SWAGGER_CODEGEN) generate -i $(ABE_EXAMPLE_SPEC) \ - -l go -o examples/clients --additional-properties packageName=abe - @rm -f $(EXAMPLE_CLIENT_DIR)/README.md $(EXAMPLE_CLIENT_DIR)/git_push.sh $(EXAMPLE_CLIENT_DIR)/.gitignore + -l go -o examples/clients/abe --additional-properties packageName=abe + @rm -f $(EXAMPLE_CLIENT_DIR)/abe/README.md \ + $(EXAMPLE_CLIENT_DIR)/abe/git_push.sh \ + $(EXAMPLE_CLIENT_DIR)/abe/.travis.yml examples: $(EXAMPLE_SVCSRCS) $(EXAMPLE_GWSRCS) $(EXAMPLE_DEPSRCS) $(EXAMPLE_SWAGGERSRCS) $(EXAMPLE_CLIENT_SRCS) test: examples @@ -132,12 +143,12 @@ lint: clean distclean: rm -f $(GATEWAY_PLUGIN) realclean: distclean - rm -f $(OPTIONS_GO) rm -f $(EXAMPLE_SVCSRCS) $(EXAMPLE_DEPSRCS) rm -f $(EXAMPLE_GWSRCS) rm -f $(EXAMPLE_SWAGGERSRCS) rm -f $(GO_PLUGIN) rm -f $(SWAGGER_PLUGIN) rm -f $(EXAMPLE_CLIENT_SRCS) + rm -f $(OPENAPIV2_GO) .PHONY: generate examples test lint clean distclean realclean diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/README.md b/vendor/github.com/grpc-ecosystem/grpc-gateway/README.md index 718047f32346..c7396cc154b2 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/README.md +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/README.md @@ -12,14 +12,10 @@ It helps you to provide your APIs in both gRPC and RESTful style at the same tim ![architecture introduction diagram](https://docs.google.com/drawings/d/12hp4CPqrNPFhattL_cIoJptFvlAqm5wLQ0ggqI5mkCg/pub?w=749&h=370) ## Background -gRPC is great -- it generates API clients and server stubs in many programming languages, -it is fast, easy-to-use, bandwidth-efficient and its design is combat-proven by Google. -However, you might still want to provide classic RESTful APIs too for some reasons -- -compatibility with languages not supported by gRPC, API backward-compatibility or aesthetics -of RESTful architecture. +gRPC is great -- it generates API clients and server stubs in many programming languages, it is fast, easy-to-use, bandwidth-efficient and its design is combat-proven by Google. +However, you might still want to provide a traditional RESTful API as well. Reasons can range from maintaining backwards-compatibility, supporting languages or clients not well supported by gRPC to simply maintaining the aesthetics and tooling involved with a RESTful architecture. -That's what grpc-gateway helps you to do. You just need to implement your gRPC service with a small amount of custom options. -Then the reverse-proxy generated by grpc-gateway serves RESTful API on top of the gRPC service. +This project aims to provide that HTTP+JSON interface to your gRPC service. A small amount of configuration in your service to attach HTTP semantics is all that's needed to generate a reverse-proxy with this library. ## Installation First you need to install ProtocolBuffers 3.0.0-beta-3 or later. @@ -36,7 +32,7 @@ make check sudo make install ``` -Then, `go get -u` as usual. +Then, `go get -u` as usual the following packages: ```sh go get -u github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway @@ -90,7 +86,7 @@ Make sure that your `$GOPATH/bin` is in your `$PATH`. protoc -I/usr/local/include -I. \ -I$GOPATH/src \ -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \ - --go_out=Mgoogle/api/annotations.proto=github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api,plugins=grpc:. \ + --go_out=plugins=grpc:. \ path/to/your_service.proto ``` @@ -109,11 +105,13 @@ Make sure that your `$GOPATH/bin` is in your `$PATH`. protoc -I/usr/local/include -I. \ -I$GOPATH/src \ -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \ - --plugin=protoc-gen-grpc-ruby=grpc_ruby_plugin \ + --plugin=protoc-gen-grpc=grpc_ruby_plugin \ --grpc-ruby_out=. \ path/to/your/service.proto ``` - 2. Implement your service + 2. Add the googleapis-common-protos gem (or your language equivalent) as a dependency to your project. + 3. Implement your service + 5. Generate reverse-proxy ```sh @@ -125,11 +123,15 @@ Make sure that your `$GOPATH/bin` is in your `$PATH`. ``` It will generate a reverse proxy `path/to/your_service.pb.gw.go`. + + Note: After generating the code for each of the stubs, in order to build the code, you will want to run ```go get .``` from the directory containing the stubs. + 6. Write an entrypoint Now you need to write an entrypoint of the proxy server. ```go package main + import ( "flag" "net/http" @@ -158,8 +160,7 @@ Make sure that your `$GOPATH/bin` is in your `$PATH`. return err } - http.ListenAndServe(":8080", mux) - return nil + return http.ListenAndServe(":8080", mux) } func main() { @@ -186,6 +187,9 @@ Make sure that your `$GOPATH/bin` is in your `$PATH`. `protoc-gen-grpc-gateway` supports custom mapping from Protobuf `import` to Golang import path. They are compatible to [the parameters with same names in `protoc-gen-go`](https://github.com/golang/protobuf#parameters). +In addition we also support the `request_context` parameter in order to use the `http.Request`'s Context (only for Go 1.7 and above). +This parameter can be useful to pass request scoped context between the gateway and the gRPC service. + `protoc-gen-grpc-gateway` also supports some more command line flags to control logging. You can give these flags together with parameters above. Run `protoc-gen-grpc-gateway --help` for more details about the flags. ## More Examples @@ -204,14 +208,15 @@ To use the same port for custom HTTP handlers (e.g. serving `swagger.json`), gRP * Method parameters in request body * Method parameters in request path * Method parameters in query string -* Mapping streaming APIs to JSON streams -* Mapping HTTP headers with `Grpc-Metadata-` prefix to gRPC metadata +* Enum fields in path parameter (including repeated enum fields). +* Mapping streaming APIs to newline-delimited JSON streams +* Mapping HTTP headers with `Grpc-Metadata-` prefix to gRPC metadata (prefixed with `grpcgateway-`) * Optionally emitting API definition for [Swagger](http://swagger.io). * Setting [gRPC timeouts](http://www.grpc.io/docs/guides/wire.html) through inbound HTTP `Grpc-Timeout` header. ### Want to support But not yet. -* bytes and enum fields in path parameter. #5 +* bytes fields in path parameter. #5 * Optionally generating the entrypoint. #8 * `import_path` parameter @@ -228,7 +233,9 @@ But patch is welcome. * HTTP request source IP is added as `X-Forwarded-For` gRPC request header * HTTP request host is added as `X-Forwarded-Host` gRPC request header * HTTP `Authorization` header is added as `authorization` gRPC request header -* Remaining HTTP header keys are prefixed with `Grpc-Metadata-` and added with their values to gRPC request header +* Remaining Permanent HTTP header keys (as specified by the IANA [here](http://www.iana.org/assignments/message-headers/message-headers.xhtml) are prefixed with `grpcgateway-` and added with their values to gRPC request header +* HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata (prefixed with `grpcgateway-`) +* While configurable, the default {un,}marshaling uses [jsonpb](https://godoc.org/github.com/golang/protobuf/jsonpb) with `OrigName: true`. # Contribution See [CONTRIBUTING.md](http://github.com/grpc-ecosystem/grpc-gateway/blob/master/CONTRIBUTING.md). diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/browser/package.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/browser/package.json index c78bdcbf0c92..963f4cd6c306 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/browser/package.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/browser/package.json @@ -17,7 +17,7 @@ "gulp-shell": "^0.5.2", "jasmine": "^2.4.1", "phantomjs": "^2.1.7", - "swagger-client": "^2.1.14", + "swagger-client": "^2.1.28", "webpack-stream": "^3.2.0" } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/client_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/client_test.go index 0574d9fbb0c3..536a7a27fc2a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/client_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/client_test.go @@ -18,7 +18,7 @@ func TestEchoClient(t *testing.T) { } cl := echo.NewEchoServiceApiWithBasePath("http://localhost:8080") - resp, err := cl.Echo("foo") + resp, _, err := cl.Echo("foo") if err != nil { t.Errorf(`cl.Echo("foo") failed with %v; want success`, err) } @@ -35,7 +35,7 @@ func TestEchoBodyClient(t *testing.T) { cl := echo.NewEchoServiceApiWithBasePath("http://localhost:8080") req := echo.ExamplepbSimpleMessage{Id: "foo"} - resp, err := cl.EchoBody(req) + resp, _, err := cl.EchoBody(req) if err != nil { t.Errorf("cl.EchoBody(%#v) failed with %v; want success", req, err) } @@ -56,7 +56,7 @@ func TestAbitOfEverythingClient(t *testing.T) { } func testABEClientCreate(t *testing.T, cl *abe.ABitOfEverythingServiceApi) { - want := abe.ExamplepbABitOfEverything{ + want := &abe.ExamplepbABitOfEverything{ FloatValue: 1.5, DoubleValue: 2.5, Int64Value: "4294967296", @@ -73,7 +73,7 @@ func testABEClientCreate(t *testing.T, cl *abe.ABitOfEverythingServiceApi) { Sint64Value: "4611686018427387903", NonConventionalNameValue: "camelCase", } - resp, err := cl.Create( + resp, _, err := cl.Create( want.FloatValue, want.DoubleValue, want.Int64Value, @@ -148,7 +148,7 @@ func testABEClientCreateBody(t *testing.T, cl *abe.ABitOfEverythingServiceApi) { "b": {Name: "y", Amount: 2}, }, } - resp, err := cl.CreateBody(want) + resp, _, err := cl.CreateBody(want) if err != nil { t.Errorf("cl.CreateBody(%#v) failed with %v; want success", want, err) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.gitignore b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.gitignore new file mode 100644 index 000000000000..2f88269126dd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.gitignore @@ -0,0 +1 @@ +/docs diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.swagger-codegen-ignore b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.swagger-codegen-ignore new file mode 100644 index 000000000000..6c7b69a01563 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/.swagger-codegen-ignore @@ -0,0 +1 @@ +.gitignore diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingNested.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingNested.go deleted file mode 100644 index ab15b89cfbd2..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingNested.go +++ /dev/null @@ -1,11 +0,0 @@ -package abe - -import ( -) - -type ABitOfEverythingNested struct { - Amount int64 `json:"amount,omitempty"` - Name string `json:"name,omitempty"` - Ok NestedDeepEnum `json:"ok,omitempty"` - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingServiceApi.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingServiceApi.go deleted file mode 100644 index eb470763999a..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ABitOfEverythingServiceApi.go +++ /dev/null @@ -1,648 +0,0 @@ -package abe - -import ( - "strings" - "fmt" - "encoding/json" - "errors" - "github.com/dghubble/sling" -) - -type ABitOfEverythingServiceApi struct { - basePath string -} - -func NewABitOfEverythingServiceApi() *ABitOfEverythingServiceApi{ - return &ABitOfEverythingServiceApi { - basePath: "http://localhost", - } -} - -func NewABitOfEverythingServiceApiWithBasePath(basePath string) *ABitOfEverythingServiceApi{ - return &ABitOfEverythingServiceApi { - basePath: basePath, - } -} - -/** - * - * - * @param floatValue - * @param doubleValue - * @param int64Value - * @param uint64Value - * @param int32Value - * @param fixed64Value - * @param fixed32Value - * @param boolValue - * @param stringValue - * @param uint32Value - * @param sfixed32Value - * @param sfixed64Value - * @param sint32Value - * @param sint64Value - * @param nonConventionalNameValue - * @return ExamplepbABitOfEverything - */ -//func (a ABitOfEverythingServiceApi) Create (floatValue float32, doubleValue float64, int64Value string, uint64Value string, int32Value int32, fixed64Value string, fixed32Value int64, boolValue bool, stringValue string, uint32Value int64, sfixed32Value int32, sfixed64Value string, sint32Value int32, sint64Value string, nonConventionalNameValue string) (ExamplepbABitOfEverything, error) { -func (a ABitOfEverythingServiceApi) Create (floatValue float32, doubleValue float64, int64Value string, uint64Value string, int32Value int32, fixed64Value string, fixed32Value int64, boolValue bool, stringValue string, uint32Value int64, sfixed32Value int32, sfixed64Value string, sint32Value int32, sint64Value string, nonConventionalNameValue string) (ExamplepbABitOfEverything, error) { - - _sling := sling.New().Post(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything/{float_value}/{double_value}/{int64_value}/separator/{uint64_value}/{int32_value}/{fixed64_value}/{fixed32_value}/{bool_value}/{string_value}/{uint32_value}/{sfixed32_value}/{sfixed64_value}/{sint32_value}/{sint64_value}/{nonConventionalNameValue}" - path = strings.Replace(path, "{" + "float_value" + "}", fmt.Sprintf("%v", floatValue), -1) - path = strings.Replace(path, "{" + "double_value" + "}", fmt.Sprintf("%v", doubleValue), -1) - path = strings.Replace(path, "{" + "int64_value" + "}", fmt.Sprintf("%v", int64Value), -1) - path = strings.Replace(path, "{" + "uint64_value" + "}", fmt.Sprintf("%v", uint64Value), -1) - path = strings.Replace(path, "{" + "int32_value" + "}", fmt.Sprintf("%v", int32Value), -1) - path = strings.Replace(path, "{" + "fixed64_value" + "}", fmt.Sprintf("%v", fixed64Value), -1) - path = strings.Replace(path, "{" + "fixed32_value" + "}", fmt.Sprintf("%v", fixed32Value), -1) - path = strings.Replace(path, "{" + "bool_value" + "}", fmt.Sprintf("%v", boolValue), -1) - path = strings.Replace(path, "{" + "string_value" + "}", fmt.Sprintf("%v", stringValue), -1) - path = strings.Replace(path, "{" + "uint32_value" + "}", fmt.Sprintf("%v", uint32Value), -1) - path = strings.Replace(path, "{" + "sfixed32_value" + "}", fmt.Sprintf("%v", sfixed32Value), -1) - path = strings.Replace(path, "{" + "sfixed64_value" + "}", fmt.Sprintf("%v", sfixed64Value), -1) - path = strings.Replace(path, "{" + "sint32_value" + "}", fmt.Sprintf("%v", sint32Value), -1) - path = strings.Replace(path, "{" + "sint64_value" + "}", fmt.Sprintf("%v", sint64Value), -1) - path = strings.Replace(path, "{" + "nonConventionalNameValue" + "}", fmt.Sprintf("%v", nonConventionalNameValue), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(ExamplepbABitOfEverything) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param body - * @return ExamplepbABitOfEverything - */ -//func (a ABitOfEverythingServiceApi) CreateBody (body ExamplepbABitOfEverything) (ExamplepbABitOfEverything, error) { -func (a ABitOfEverythingServiceApi) CreateBody (body ExamplepbABitOfEverything) (ExamplepbABitOfEverything, error) { - - _sling := sling.New().Post(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything" - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - -// body params - _sling = _sling.BodyJSON(body) - - var successPayload = new(ExamplepbABitOfEverything) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param singleNestedName - * @param body - * @return ExamplepbABitOfEverything - */ -//func (a ABitOfEverythingServiceApi) DeepPathEcho (singleNestedName string, body ExamplepbABitOfEverything) (ExamplepbABitOfEverything, error) { -func (a ABitOfEverythingServiceApi) DeepPathEcho (singleNestedName string, body ExamplepbABitOfEverything) (ExamplepbABitOfEverything, error) { - - _sling := sling.New().Post(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything/{single_nested.name}" - path = strings.Replace(path, "{" + "single_nested.name" + "}", fmt.Sprintf("%v", singleNestedName), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - -// body params - _sling = _sling.BodyJSON(body) - - var successPayload = new(ExamplepbABitOfEverything) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param uuid - * @return ProtobufEmpty - */ -//func (a ABitOfEverythingServiceApi) Delete (uuid string) (ProtobufEmpty, error) { -func (a ABitOfEverythingServiceApi) Delete (uuid string) (ProtobufEmpty, error) { - - _sling := sling.New().Delete(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything/{uuid}" - path = strings.Replace(path, "{" + "uuid" + "}", fmt.Sprintf("%v", uuid), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(ProtobufEmpty) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param value - * @return SubStringMessage - */ -//func (a ABitOfEverythingServiceApi) Echo (value string) (SubStringMessage, error) { -func (a ABitOfEverythingServiceApi) Echo (value string) (SubStringMessage, error) { - - _sling := sling.New().Get(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything/echo/{value}" - path = strings.Replace(path, "{" + "value" + "}", fmt.Sprintf("%v", value), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(SubStringMessage) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @return SubStringMessage - */ -//func (a ABitOfEverythingServiceApi) Echo_1 () (SubStringMessage, error) { -func (a ABitOfEverythingServiceApi) Echo_1 () (SubStringMessage, error) { - - _sling := sling.New().Get(a.basePath) - - // create path and map variables - path := "/v2/example/echo" - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(SubStringMessage) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param body - * @return SubStringMessage - */ -//func (a ABitOfEverythingServiceApi) Echo_2 (body string) (SubStringMessage, error) { -func (a ABitOfEverythingServiceApi) Echo_2 (body string) (SubStringMessage, error) { - - _sling := sling.New().Post(a.basePath) - - // create path and map variables - path := "/v2/example/echo" - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - -// body params - _sling = _sling.BodyJSON(body) - - var successPayload = new(SubStringMessage) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param uuid - * @return ExamplepbABitOfEverything - */ -//func (a ABitOfEverythingServiceApi) Lookup (uuid string) (ExamplepbABitOfEverything, error) { -func (a ABitOfEverythingServiceApi) Lookup (uuid string) (ExamplepbABitOfEverything, error) { - - _sling := sling.New().Get(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything/{uuid}" - path = strings.Replace(path, "{" + "uuid" + "}", fmt.Sprintf("%v", uuid), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(ExamplepbABitOfEverything) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @return ProtobufEmpty - */ -//func (a ABitOfEverythingServiceApi) Timeout () (ProtobufEmpty, error) { -func (a ABitOfEverythingServiceApi) Timeout () (ProtobufEmpty, error) { - - _sling := sling.New().Get(a.basePath) - - // create path and map variables - path := "/v2/example/timeout" - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(ProtobufEmpty) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * - * - * @param uuid - * @param body - * @return ProtobufEmpty - */ -//func (a ABitOfEverythingServiceApi) Update (uuid string, body ExamplepbABitOfEverything) (ProtobufEmpty, error) { -func (a ABitOfEverythingServiceApi) Update (uuid string, body ExamplepbABitOfEverything) (ProtobufEmpty, error) { - - _sling := sling.New().Put(a.basePath) - - // create path and map variables - path := "/v1/example/a_bit_of_everything/{uuid}" - path = strings.Replace(path, "{" + "uuid" + "}", fmt.Sprintf("%v", uuid), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - -// body params - _sling = _sling.BodyJSON(body) - - var successPayload = new(ProtobufEmpty) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbABitOfEverything.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbABitOfEverything.go deleted file mode 100644 index 2ca345dcc089..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbABitOfEverything.go +++ /dev/null @@ -1,35 +0,0 @@ -package abe - -import ( - "time" -) - -type ExamplepbABitOfEverything struct { - BoolValue bool `json:"bool_value,omitempty"` - DoubleValue float64 `json:"double_value,omitempty"` - EnumValue ExamplepbNumericEnum `json:"enum_value,omitempty"` - Fixed32Value int64 `json:"fixed32_value,omitempty"` - Fixed64Value string `json:"fixed64_value,omitempty"` - FloatValue float32 `json:"float_value,omitempty"` - Int32Value int32 `json:"int32_value,omitempty"` - Int64Value string `json:"int64_value,omitempty"` - MapValue map[string]ExamplepbNumericEnum `json:"map_value,omitempty"` - MappedNestedValue map[string]ABitOfEverythingNested `json:"mapped_nested_value,omitempty"` - MappedStringValue map[string]string `json:"mapped_string_value,omitempty"` - Nested []ABitOfEverythingNested `json:"nested,omitempty"` - NonConventionalNameValue string `json:"nonConventionalNameValue,omitempty"` - OneofEmpty ProtobufEmpty `json:"oneof_empty,omitempty"` - OneofString string `json:"oneof_string,omitempty"` - RepeatedStringValue []string `json:"repeated_string_value,omitempty"` - Sfixed32Value int32 `json:"sfixed32_value,omitempty"` - Sfixed64Value string `json:"sfixed64_value,omitempty"` - SingleNested ABitOfEverythingNested `json:"single_nested,omitempty"` - Sint32Value int32 `json:"sint32_value,omitempty"` - Sint64Value string `json:"sint64_value,omitempty"` - StringValue string `json:"string_value,omitempty"` - TimestampValue time.Time `json:"timestamp_value,omitempty"` - Uint32Value int64 `json:"uint32_value,omitempty"` - Uint64Value string `json:"uint64_value,omitempty"` - Uuid string `json:"uuid,omitempty"` - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbNumericEnum.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbNumericEnum.go deleted file mode 100644 index 51cffc77f699..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ExamplepbNumericEnum.go +++ /dev/null @@ -1,8 +0,0 @@ -package abe - -import ( -) - -type ExamplepbNumericEnum struct { - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/NestedDeepEnum.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/NestedDeepEnum.go deleted file mode 100644 index b48e231216c5..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/NestedDeepEnum.go +++ /dev/null @@ -1,8 +0,0 @@ -package abe - -import ( -) - -type NestedDeepEnum struct { - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufDuration.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufDuration.go new file mode 100644 index 000000000000..837f298bd91a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufDuration.go @@ -0,0 +1,10 @@ +package abe + +import ( +) + +type ProtobufDuration struct { + Seconds string `json:"seconds,omitempty"` + Nanos int32 `json:"nanos,omitempty"` + +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufEmpty.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufEmpty.go deleted file mode 100644 index ac37afeb8623..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/ProtobufEmpty.go +++ /dev/null @@ -1,8 +0,0 @@ -package abe - -import ( -) - -type ProtobufEmpty struct { - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/Sub2IdMessage.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/Sub2IdMessage.go deleted file mode 100644 index 77ebe152ba00..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/Sub2IdMessage.go +++ /dev/null @@ -1,9 +0,0 @@ -package abe - -import ( -) - -type Sub2IdMessage struct { - Uuid string `json:"uuid,omitempty"` - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/SubStringMessage.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/SubStringMessage.go deleted file mode 100644 index f278dbf258b1..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/SubStringMessage.go +++ /dev/null @@ -1,9 +0,0 @@ -package abe - -import ( -) - -type SubStringMessage struct { - Value string `json:"value,omitempty"` - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_nested.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_nested.go new file mode 100644 index 000000000000..095e8c5f1188 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_nested.go @@ -0,0 +1,22 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +// Nested is nested type. +type ABitOfEverythingNested struct { + + // name is nested field. + Name string `json:"name,omitempty"` + + Amount int64 `json:"amount,omitempty"` + + Ok NestedDeepEnum `json:"ok,omitempty"` +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_service_api.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_service_api.go new file mode 100644 index 000000000000..d66b34f54b2e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/a_bit_of_everything_service_api.go @@ -0,0 +1,798 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +import ( + "net/url" + "strings" + "time" + "encoding/json" + "fmt" +) + +type ABitOfEverythingServiceApi struct { + Configuration *Configuration +} + +func NewABitOfEverythingServiceApi() *ABitOfEverythingServiceApi { + configuration := NewConfiguration() + return &ABitOfEverythingServiceApi{ + Configuration: configuration, + } +} + +func NewABitOfEverythingServiceApiWithBasePath(basePath string) *ABitOfEverythingServiceApi { + configuration := NewConfiguration() + configuration.BasePath = basePath + + return &ABitOfEverythingServiceApi{ + Configuration: configuration, + } +} + +/** + * + * + * @param floatValue + * @param doubleValue + * @param int64Value + * @param uint64Value + * @param int32Value + * @param fixed64Value + * @param fixed32Value + * @param boolValue + * @param stringValue + * @param uint32Value + * @param sfixed32Value + * @param sfixed64Value + * @param sint32Value + * @param sint64Value + * @param nonConventionalNameValue + * @return *ExamplepbABitOfEverything + */ +func (a ABitOfEverythingServiceApi) Create(floatValue float32, doubleValue float64, int64Value string, uint64Value string, int32Value int32, fixed64Value string, fixed32Value int64, boolValue bool, stringValue string, uint32Value int64, sfixed32Value int32, sfixed64Value string, sint32Value int32, sint64Value string, nonConventionalNameValue string) (*ExamplepbABitOfEverything, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Post") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/{float_value}/{double_value}/{int64_value}/separator/{uint64_value}/{int32_value}/{fixed64_value}/{fixed32_value}/{bool_value}/{string_value}/{uint32_value}/{sfixed32_value}/{sfixed64_value}/{sint32_value}/{sint64_value}/{nonConventionalNameValue}" + localVarPath = strings.Replace(localVarPath, "{"+"float_value"+"}", fmt.Sprintf("%v", floatValue), -1) + localVarPath = strings.Replace(localVarPath, "{"+"double_value"+"}", fmt.Sprintf("%v", doubleValue), -1) + localVarPath = strings.Replace(localVarPath, "{"+"int64_value"+"}", fmt.Sprintf("%v", int64Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"uint64_value"+"}", fmt.Sprintf("%v", uint64Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"int32_value"+"}", fmt.Sprintf("%v", int32Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"fixed64_value"+"}", fmt.Sprintf("%v", fixed64Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"fixed32_value"+"}", fmt.Sprintf("%v", fixed32Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"bool_value"+"}", fmt.Sprintf("%v", boolValue), -1) + localVarPath = strings.Replace(localVarPath, "{"+"string_value"+"}", fmt.Sprintf("%v", stringValue), -1) + localVarPath = strings.Replace(localVarPath, "{"+"uint32_value"+"}", fmt.Sprintf("%v", uint32Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"sfixed32_value"+"}", fmt.Sprintf("%v", sfixed32Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"sfixed64_value"+"}", fmt.Sprintf("%v", sfixed64Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"sint32_value"+"}", fmt.Sprintf("%v", sint32Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"sint64_value"+"}", fmt.Sprintf("%v", sint64Value), -1) + localVarPath = strings.Replace(localVarPath, "{"+"nonConventionalNameValue"+"}", fmt.Sprintf("%v", nonConventionalNameValue), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ExamplepbABitOfEverything) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Create", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @param body + * @return *ExamplepbABitOfEverything + */ +func (a ABitOfEverythingServiceApi) CreateBody(body ExamplepbABitOfEverything) (*ExamplepbABitOfEverything, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Post") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + // body params + localVarPostBody = &body + var successPayload = new(ExamplepbABitOfEverything) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "CreateBody", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @param singleNestedName + * @param body + * @return *ExamplepbABitOfEverything + */ +func (a ABitOfEverythingServiceApi) DeepPathEcho(singleNestedName string, body ExamplepbABitOfEverything) (*ExamplepbABitOfEverything, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Post") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/{single_nested.name}" + localVarPath = strings.Replace(localVarPath, "{"+"single_nested.name"+"}", fmt.Sprintf("%v", singleNestedName), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + // body params + localVarPostBody = &body + var successPayload = new(ExamplepbABitOfEverything) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "DeepPathEcho", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @param uuid + * @return *ProtobufEmpty + */ +func (a ABitOfEverythingServiceApi) Delete(uuid string) (*ProtobufEmpty, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Delete") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/{uuid}" + localVarPath = strings.Replace(localVarPath, "{"+"uuid"+"}", fmt.Sprintf("%v", uuid), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ProtobufEmpty) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Delete", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * Echo allows posting a StringMessage value. + * It also exposes multiple bindings. This makes it useful when validating that the OpenAPI v2 API description exposes documentation correctly on all paths defined as additional_bindings in the proto. + * + * @param value + * @return *SubStringMessage + */ +func (a ABitOfEverythingServiceApi) Echo(value string) (*SubStringMessage, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Get") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/echo/{value}" + localVarPath = strings.Replace(localVarPath, "{"+"value"+"}", fmt.Sprintf("%v", value), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(SubStringMessage) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Echo", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * Echo allows posting a StringMessage value. + * It also exposes multiple bindings. This makes it useful when validating that the OpenAPI v2 API description exposes documentation correctly on all paths defined as additional_bindings in the proto. + * + * @param body + * @return *SubStringMessage + */ +func (a ABitOfEverythingServiceApi) Echo2(body string) (*SubStringMessage, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Post") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v2/example/echo" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + // body params + localVarPostBody = &body + var successPayload = new(SubStringMessage) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Echo2", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * Echo allows posting a StringMessage value. + * It also exposes multiple bindings. This makes it useful when validating that the OpenAPI v2 API description exposes documentation correctly on all paths defined as additional_bindings in the proto. + * + * @param value + * @return *SubStringMessage + */ +func (a ABitOfEverythingServiceApi) Echo3(value string) (*SubStringMessage, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Get") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v2/example/echo" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + localVarQueryParams.Add("value", a.Configuration.APIClient.ParameterToString(value, "")) + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(SubStringMessage) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Echo3", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @param uuid + * @param singleNestedName name is nested field. + * @param singleNestedAmount + * @param singleNestedOk - FALSE: FALSE is false. - TRUE: TRUE is true. + * @param floatValue + * @param doubleValue + * @param int64Value + * @param uint64Value + * @param int32Value + * @param fixed64Value + * @param fixed32Value + * @param boolValue + * @param stringValue + * @param uint32Value TODO(yugui) add bytes_value. + * @param enumValue - ZERO: ZERO means 0 - ONE: ONE means 1 + * @param sfixed32Value + * @param sfixed64Value + * @param sint32Value + * @param sint64Value + * @param repeatedStringValue + * @param oneofString + * @param nonConventionalNameValue + * @param timestampValue + * @param repeatedEnumValue repeated enum value. it is comma-separated in query. - ZERO: ZERO means 0 - ONE: ONE means 1 + * @return *ProtobufEmpty + */ +func (a ABitOfEverythingServiceApi) GetQuery(uuid string, singleNestedName string, singleNestedAmount int64, singleNestedOk string, floatValue float32, doubleValue float64, int64Value string, uint64Value string, int32Value int32, fixed64Value string, fixed32Value int64, boolValue bool, stringValue string, uint32Value int64, enumValue string, sfixed32Value int32, sfixed64Value string, sint32Value int32, sint64Value string, repeatedStringValue []string, oneofString string, nonConventionalNameValue string, timestampValue time.Time, repeatedEnumValue []string) (*ProtobufEmpty, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Get") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/query/{uuid}" + localVarPath = strings.Replace(localVarPath, "{"+"uuid"+"}", fmt.Sprintf("%v", uuid), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + localVarQueryParams.Add("single_nested.name", a.Configuration.APIClient.ParameterToString(singleNestedName, "")) + localVarQueryParams.Add("single_nested.amount", a.Configuration.APIClient.ParameterToString(singleNestedAmount, "")) + localVarQueryParams.Add("single_nested.ok", a.Configuration.APIClient.ParameterToString(singleNestedOk, "")) + localVarQueryParams.Add("float_value", a.Configuration.APIClient.ParameterToString(floatValue, "")) + localVarQueryParams.Add("double_value", a.Configuration.APIClient.ParameterToString(doubleValue, "")) + localVarQueryParams.Add("int64_value", a.Configuration.APIClient.ParameterToString(int64Value, "")) + localVarQueryParams.Add("uint64_value", a.Configuration.APIClient.ParameterToString(uint64Value, "")) + localVarQueryParams.Add("int32_value", a.Configuration.APIClient.ParameterToString(int32Value, "")) + localVarQueryParams.Add("fixed64_value", a.Configuration.APIClient.ParameterToString(fixed64Value, "")) + localVarQueryParams.Add("fixed32_value", a.Configuration.APIClient.ParameterToString(fixed32Value, "")) + localVarQueryParams.Add("bool_value", a.Configuration.APIClient.ParameterToString(boolValue, "")) + localVarQueryParams.Add("string_value", a.Configuration.APIClient.ParameterToString(stringValue, "")) + localVarQueryParams.Add("uint32_value", a.Configuration.APIClient.ParameterToString(uint32Value, "")) + localVarQueryParams.Add("enum_value", a.Configuration.APIClient.ParameterToString(enumValue, "")) + localVarQueryParams.Add("sfixed32_value", a.Configuration.APIClient.ParameterToString(sfixed32Value, "")) + localVarQueryParams.Add("sfixed64_value", a.Configuration.APIClient.ParameterToString(sfixed64Value, "")) + localVarQueryParams.Add("sint32_value", a.Configuration.APIClient.ParameterToString(sint32Value, "")) + localVarQueryParams.Add("sint64_value", a.Configuration.APIClient.ParameterToString(sint64Value, "")) + var repeatedStringValueCollectionFormat = "csv" + localVarQueryParams.Add("repeated_string_value", a.Configuration.APIClient.ParameterToString(repeatedStringValue, repeatedStringValueCollectionFormat)) + + localVarQueryParams.Add("oneof_string", a.Configuration.APIClient.ParameterToString(oneofString, "")) + localVarQueryParams.Add("nonConventionalNameValue", a.Configuration.APIClient.ParameterToString(nonConventionalNameValue, "")) + localVarQueryParams.Add("timestamp_value", a.Configuration.APIClient.ParameterToString(timestampValue, "")) + var repeatedEnumValueCollectionFormat = "csv" + localVarQueryParams.Add("repeated_enum_value", a.Configuration.APIClient.ParameterToString(repeatedEnumValue, repeatedEnumValueCollectionFormat)) + + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ProtobufEmpty) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "GetQuery", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @param uuid + * @return *ExamplepbABitOfEverything + */ +func (a ABitOfEverythingServiceApi) Lookup(uuid string) (*ExamplepbABitOfEverything, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Get") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/{uuid}" + localVarPath = strings.Replace(localVarPath, "{"+"uuid"+"}", fmt.Sprintf("%v", uuid), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ExamplepbABitOfEverything) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Lookup", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @return *ProtobufEmpty + */ +func (a ABitOfEverythingServiceApi) Timeout() (*ProtobufEmpty, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Get") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v2/example/timeout" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ProtobufEmpty) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Timeout", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * + * + * @param uuid + * @param body + * @return *ProtobufEmpty + */ +func (a ABitOfEverythingServiceApi) Update(uuid string, body ExamplepbABitOfEverything) (*ProtobufEmpty, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Put") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/a_bit_of_everything/{uuid}" + localVarPath = strings.Replace(localVarPath, "{"+"uuid"+"}", fmt.Sprintf("%v", uuid), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", "application/x-foo-mime", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + "application/x-foo-mime", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + // body params + localVarPostBody = &body + var successPayload = new(ProtobufEmpty) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Update", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_client.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_client.go new file mode 100644 index 000000000000..bf3e21a9fb10 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_client.go @@ -0,0 +1,164 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +import ( + "bytes" + "fmt" + "path/filepath" + "reflect" + "strings" + "net/url" + "io/ioutil" + "github.com/go-resty/resty" +) + +type APIClient struct { + config *Configuration +} + +func (c *APIClient) SelectHeaderContentType(contentTypes []string) string { + + if len(contentTypes) == 0 { + return "" + } + if contains(contentTypes, "application/json") { + return "application/json" + } + return contentTypes[0] // use the first content type specified in 'consumes' +} + +func (c *APIClient) SelectHeaderAccept(accepts []string) string { + + if len(accepts) == 0 { + return "" + } + if contains(accepts, "application/json") { + return "application/json" + } + return strings.Join(accepts, ",") +} + +func contains(haystack []string, needle string) bool { + for _, a := range haystack { + if strings.ToLower(a) == strings.ToLower(needle) { + return true + } + } + return false +} + +func (c *APIClient) CallAPI(path string, method string, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams map[string]string, + fileName string, + fileBytes []byte) (*resty.Response, error) { + + rClient := c.prepareClient() + request := c.prepareRequest(rClient, postBody, headerParams, queryParams, formParams, fileName, fileBytes) + + switch strings.ToUpper(method) { + case "GET": + response, err := request.Get(path) + return response, err + case "POST": + response, err := request.Post(path) + return response, err + case "PUT": + response, err := request.Put(path) + return response, err + case "PATCH": + response, err := request.Patch(path) + return response, err + case "DELETE": + response, err := request.Delete(path) + return response, err + } + + return nil, fmt.Errorf("invalid method %v", method) +} + +func (c *APIClient) ParameterToString(obj interface{}, collectionFormat string) string { + delimiter := "" + switch collectionFormat { + case "pipes": + delimiter = "|" + case "ssv": + delimiter = " " + case "tsv": + delimiter = "\t" + case "csv": + delimiter = "," + } + + if reflect.TypeOf(obj).Kind() == reflect.Slice { + return strings.Trim(strings.Replace(fmt.Sprint(obj), " ", delimiter, -1), "[]") + } + + return fmt.Sprintf("%v", obj) +} + +func (c *APIClient) prepareClient() *resty.Client { + + rClient := resty.New() + + rClient.SetDebug(c.config.Debug) + if c.config.Transport != nil { + rClient.SetTransport(c.config.Transport) + } + + if c.config.Timeout != nil { + rClient.SetTimeout(*c.config.Timeout) + } + rClient.SetLogger(ioutil.Discard) + return rClient +} + +func (c *APIClient) prepareRequest( + rClient *resty.Client, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams map[string]string, + fileName string, + fileBytes []byte) *resty.Request { + + + request := rClient.R() + request.SetBody(postBody) + + if c.config.UserAgent != "" { + request.SetHeader("User-Agent", c.config.UserAgent) + } + + // add header parameter, if any + if len(headerParams) > 0 { + request.SetHeaders(headerParams) + } + + // add query parameter, if any + if len(queryParams) > 0 { + request.SetMultiValueQueryParams(queryParams) + } + + // add form parameter, if any + if len(formParams) > 0 { + request.SetFormData(formParams) + } + + if len(fileBytes) > 0 && fileName != "" { + _, fileNm := filepath.Split(fileName) + request.SetFileReader("file", fileNm, bytes.NewReader(fileBytes)) + } + return request +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_response.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_response.go new file mode 100644 index 000000000000..ee1315f513c5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/api_response.go @@ -0,0 +1,44 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +import ( + "net/http" +) + +type APIResponse struct { + *http.Response `json:"-"` + Message string `json:"message,omitempty"` + // Operation is the name of the swagger operation. + Operation string `json:"operation,omitempty"` + // RequestURL is the request URL. This value is always available, even if the + // embedded *http.Response is nil. + RequestURL string `json:"url,omitempty"` + // Method is the HTTP method used for the request. This value is always + // available, even if the embedded *http.Response is nil. + Method string `json:"method,omitempty"` + // Payload holds the contents of the response body (which may be nil or empty). + // This is provided here as the raw response.Body() reader will have already + // been drained. + Payload []byte `json:"-"` +} + +func NewAPIResponse(r *http.Response) *APIResponse { + + response := &APIResponse{Response: r} + return response +} + +func NewAPIResponseWithError(errorMessage string) *APIResponse { + + response := &APIResponse{Message: errorMessage} + return response +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/configuration.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/configuration.go new file mode 100644 index 000000000000..ccc319c34aaf --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/configuration.go @@ -0,0 +1,67 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +import ( + "encoding/base64" + "net/http" + "time" +) + + +type Configuration struct { + Username string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` + APIKeyPrefix map[string]string `json:"APIKeyPrefix,omitempty"` + APIKey map[string]string `json:"APIKey,omitempty"` + Debug bool `json:"debug,omitempty"` + DebugFile string `json:"debugFile,omitempty"` + OAuthToken string `json:"oAuthToken,omitempty"` + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + AccessToken string `json:"accessToken,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + APIClient *APIClient + Transport *http.Transport + Timeout *time.Duration `json:"timeout,omitempty"` +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "http://localhost", + DefaultHeader: make(map[string]string), + APIKey: make(map[string]string), + APIKeyPrefix: make(map[string]string), + UserAgent: "Swagger-Codegen/1.0.0/go", + APIClient: &APIClient{}, + } + + cfg.APIClient.config = cfg + return cfg +} + +func (c *Configuration) GetBasicAuthEncodedString() string { + return base64.StdEncoding.EncodeToString([]byte(c.Username + ":" + c.Password)) +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} + +func (c *Configuration) GetAPIKeyWithPrefix(APIKeyIdentifier string) string { + if c.APIKeyPrefix[APIKeyIdentifier] != "" { + return c.APIKeyPrefix[APIKeyIdentifier] + " " + c.APIKey[APIKeyIdentifier] + } + + return c.APIKey[APIKeyIdentifier] +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_a_bit_of_everything.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_a_bit_of_everything.go new file mode 100644 index 000000000000..d8775d9c8632 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_a_bit_of_everything.go @@ -0,0 +1,72 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +import ( + "time" +) + +type ExamplepbABitOfEverything struct { + + SingleNested ABitOfEverythingNested `json:"single_nested,omitempty"` + + Uuid string `json:"uuid,omitempty"` + + Nested []ABitOfEverythingNested `json:"nested,omitempty"` + + FloatValue float32 `json:"float_value,omitempty"` + + DoubleValue float64 `json:"double_value,omitempty"` + + Int64Value string `json:"int64_value,omitempty"` + + Uint64Value string `json:"uint64_value,omitempty"` + + Int32Value int32 `json:"int32_value,omitempty"` + + Fixed64Value string `json:"fixed64_value,omitempty"` + + Fixed32Value int64 `json:"fixed32_value,omitempty"` + + BoolValue bool `json:"bool_value,omitempty"` + + StringValue string `json:"string_value,omitempty"` + + Uint32Value int64 `json:"uint32_value,omitempty"` + + EnumValue ExamplepbNumericEnum `json:"enum_value,omitempty"` + + Sfixed32Value int32 `json:"sfixed32_value,omitempty"` + + Sfixed64Value string `json:"sfixed64_value,omitempty"` + + Sint32Value int32 `json:"sint32_value,omitempty"` + + Sint64Value string `json:"sint64_value,omitempty"` + + RepeatedStringValue []string `json:"repeated_string_value,omitempty"` + + OneofEmpty ProtobufEmpty `json:"oneof_empty,omitempty"` + + OneofString string `json:"oneof_string,omitempty"` + + MapValue map[string]ExamplepbNumericEnum `json:"map_value,omitempty"` + + MappedStringValue map[string]string `json:"mapped_string_value,omitempty"` + + MappedNestedValue map[string]ABitOfEverythingNested `json:"mapped_nested_value,omitempty"` + + NonConventionalNameValue string `json:"nonConventionalNameValue,omitempty"` + + TimestampValue time.Time `json:"timestamp_value,omitempty"` + + RepeatedEnumValue []ExamplepbNumericEnum `json:"repeated_enum_value,omitempty"` +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_numeric_enum.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_numeric_enum.go new file mode 100644 index 000000000000..e953bbe34e83 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/examplepb_numeric_enum.go @@ -0,0 +1,15 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +// NumericEnum is one or zero. - ZERO: ZERO means 0 - ONE: ONE means 1 +type ExamplepbNumericEnum struct { +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/nested_deep_enum.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/nested_deep_enum.go new file mode 100644 index 000000000000..e5fc17d50a4a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/nested_deep_enum.go @@ -0,0 +1,15 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +// DeepEnum is one or zero. - FALSE: FALSE is false. - TRUE: TRUE is true. +type NestedDeepEnum struct { +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/protobuf_empty.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/protobuf_empty.go new file mode 100644 index 000000000000..97c7bf612b80 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/protobuf_empty.go @@ -0,0 +1,15 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +// service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. +type ProtobufEmpty struct { +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/sub_string_message.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/sub_string_message.go new file mode 100644 index 000000000000..2a0874fc5fad --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe/sub_string_message.go @@ -0,0 +1,16 @@ +/* + * A Bit of Everything + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * OpenAPI spec version: 1.0 + * Contact: none@example.com + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package abe + +type SubStringMessage struct { + + Value string `json:"value,omitempty"` +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.gitignore b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.gitignore new file mode 100644 index 000000000000..2f88269126dd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.gitignore @@ -0,0 +1 @@ +/docs diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.swagger-codegen-ignore b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.swagger-codegen-ignore new file mode 100644 index 000000000000..6c7b69a01563 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/.swagger-codegen-ignore @@ -0,0 +1 @@ +.gitignore diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/EchoServiceApi.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/EchoServiceApi.go deleted file mode 100644 index 0cf5b582740c..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/EchoServiceApi.go +++ /dev/null @@ -1,145 +0,0 @@ -package echo - -import ( - "strings" - "fmt" - "encoding/json" - "errors" - "github.com/dghubble/sling" -) - -type EchoServiceApi struct { - basePath string -} - -func NewEchoServiceApi() *EchoServiceApi{ - return &EchoServiceApi { - basePath: "http://localhost", - } -} - -func NewEchoServiceApiWithBasePath(basePath string) *EchoServiceApi{ - return &EchoServiceApi { - basePath: basePath, - } -} - -/** - * Echo method receives a simple message and returns it. - * The message posted as the id parameter will also be\nreturned. - * @param id - * @return ExamplepbSimpleMessage - */ -//func (a EchoServiceApi) Echo (id string) (ExamplepbSimpleMessage, error) { -func (a EchoServiceApi) Echo (id string) (ExamplepbSimpleMessage, error) { - - _sling := sling.New().Post(a.basePath) - - // create path and map variables - path := "/v1/example/echo/{id}" - path = strings.Replace(path, "{" + "id" + "}", fmt.Sprintf("%v", id), -1) - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - - - var successPayload = new(ExamplepbSimpleMessage) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} -/** - * EchoBody method receives a simple message and returns it. - * - * @param body - * @return ExamplepbSimpleMessage - */ -//func (a EchoServiceApi) EchoBody (body ExamplepbSimpleMessage) (ExamplepbSimpleMessage, error) { -func (a EchoServiceApi) EchoBody (body ExamplepbSimpleMessage) (ExamplepbSimpleMessage, error) { - - _sling := sling.New().Post(a.basePath) - - // create path and map variables - path := "/v1/example/echo_body" - - _sling = _sling.Path(path) - - // accept header - accepts := []string { "application/json" } - for key := range accepts { - _sling = _sling.Set("Accept", accepts[key]) - break // only use the first Accept - } - -// body params - _sling = _sling.BodyJSON(body) - - var successPayload = new(ExamplepbSimpleMessage) - - // We use this map (below) so that any arbitrary error JSON can be handled. - // FIXME: This is in the absence of this Go generator honoring the non-2xx - // response (error) models, which needs to be implemented at some point. - var failurePayload map[string]interface{} - - httpResponse, err := _sling.Receive(successPayload, &failurePayload) - - if err == nil { - // err == nil only means that there wasn't a sub-application-layer error (e.g. no network error) - if failurePayload != nil { - // If the failurePayload is present, there likely was some kind of non-2xx status - // returned (and a JSON payload error present) - var str []byte - str, err = json.Marshal(failurePayload) - if err == nil { // For safety, check for an error marshalling... probably superfluous - // This will return the JSON error body as a string - err = errors.New(string(str)) - } - } else { - // So, there was no network-type error, and nothing in the failure payload, - // but we should still check the status code - if httpResponse == nil { - // This should never happen... - err = errors.New("No HTTP Response received.") - } else if code := httpResponse.StatusCode; 200 > code || code > 299 { - err = errors.New("HTTP Error: " + string(httpResponse.StatusCode)) - } - } - } - - return *successPayload, err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/ExamplepbSimpleMessage.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/ExamplepbSimpleMessage.go deleted file mode 100644 index ed9ada35b70f..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/ExamplepbSimpleMessage.go +++ /dev/null @@ -1,9 +0,0 @@ -package echo - -import ( -) - -type ExamplepbSimpleMessage struct { - Id string `json:"id,omitempty"` - -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_client.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_client.go new file mode 100644 index 000000000000..7a5171480269 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_client.go @@ -0,0 +1,164 @@ +/* + * Echo Service + * + * Echo Service API consists of a single service which returns a message. + * + * OpenAPI spec version: version not set + * + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package echo + +import ( + "bytes" + "fmt" + "path/filepath" + "reflect" + "strings" + "net/url" + "io/ioutil" + "github.com/go-resty/resty" +) + +type APIClient struct { + config *Configuration +} + +func (c *APIClient) SelectHeaderContentType(contentTypes []string) string { + + if len(contentTypes) == 0 { + return "" + } + if contains(contentTypes, "application/json") { + return "application/json" + } + return contentTypes[0] // use the first content type specified in 'consumes' +} + +func (c *APIClient) SelectHeaderAccept(accepts []string) string { + + if len(accepts) == 0 { + return "" + } + if contains(accepts, "application/json") { + return "application/json" + } + return strings.Join(accepts, ",") +} + +func contains(haystack []string, needle string) bool { + for _, a := range haystack { + if strings.ToLower(a) == strings.ToLower(needle) { + return true + } + } + return false +} + +func (c *APIClient) CallAPI(path string, method string, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams map[string]string, + fileName string, + fileBytes []byte) (*resty.Response, error) { + + rClient := c.prepareClient() + request := c.prepareRequest(rClient, postBody, headerParams, queryParams, formParams, fileName, fileBytes) + + switch strings.ToUpper(method) { + case "GET": + response, err := request.Get(path) + return response, err + case "POST": + response, err := request.Post(path) + return response, err + case "PUT": + response, err := request.Put(path) + return response, err + case "PATCH": + response, err := request.Patch(path) + return response, err + case "DELETE": + response, err := request.Delete(path) + return response, err + } + + return nil, fmt.Errorf("invalid method %v", method) +} + +func (c *APIClient) ParameterToString(obj interface{}, collectionFormat string) string { + delimiter := "" + switch collectionFormat { + case "pipes": + delimiter = "|" + case "ssv": + delimiter = " " + case "tsv": + delimiter = "\t" + case "csv": + delimiter = "," + } + + if reflect.TypeOf(obj).Kind() == reflect.Slice { + return strings.Trim(strings.Replace(fmt.Sprint(obj), " ", delimiter, -1), "[]") + } + + return fmt.Sprintf("%v", obj) +} + +func (c *APIClient) prepareClient() *resty.Client { + + rClient := resty.New() + + rClient.SetDebug(c.config.Debug) + if c.config.Transport != nil { + rClient.SetTransport(c.config.Transport) + } + + if c.config.Timeout != nil { + rClient.SetTimeout(*c.config.Timeout) + } + rClient.SetLogger(ioutil.Discard) + return rClient +} + +func (c *APIClient) prepareRequest( + rClient *resty.Client, + postBody interface{}, + headerParams map[string]string, + queryParams url.Values, + formParams map[string]string, + fileName string, + fileBytes []byte) *resty.Request { + + + request := rClient.R() + request.SetBody(postBody) + + if c.config.UserAgent != "" { + request.SetHeader("User-Agent", c.config.UserAgent) + } + + // add header parameter, if any + if len(headerParams) > 0 { + request.SetHeaders(headerParams) + } + + // add query parameter, if any + if len(queryParams) > 0 { + request.SetMultiValueQueryParams(queryParams) + } + + // add form parameter, if any + if len(formParams) > 0 { + request.SetFormData(formParams) + } + + if len(fileBytes) > 0 && fileName != "" { + _, fileNm := filepath.Split(fileName) + request.SetFileReader("file", fileNm, bytes.NewReader(fileBytes)) + } + return request +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_response.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_response.go new file mode 100644 index 000000000000..8b0d07c4a13c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/api_response.go @@ -0,0 +1,44 @@ +/* + * Echo Service + * + * Echo Service API consists of a single service which returns a message. + * + * OpenAPI spec version: version not set + * + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package echo + +import ( + "net/http" +) + +type APIResponse struct { + *http.Response `json:"-"` + Message string `json:"message,omitempty"` + // Operation is the name of the swagger operation. + Operation string `json:"operation,omitempty"` + // RequestURL is the request URL. This value is always available, even if the + // embedded *http.Response is nil. + RequestURL string `json:"url,omitempty"` + // Method is the HTTP method used for the request. This value is always + // available, even if the embedded *http.Response is nil. + Method string `json:"method,omitempty"` + // Payload holds the contents of the response body (which may be nil or empty). + // This is provided here as the raw response.Body() reader will have already + // been drained. + Payload []byte `json:"-"` +} + +func NewAPIResponse(r *http.Response) *APIResponse { + + response := &APIResponse{Response: r} + return response +} + +func NewAPIResponseWithError(errorMessage string) *APIResponse { + + response := &APIResponse{Message: errorMessage} + return response +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/configuration.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/configuration.go new file mode 100644 index 000000000000..9a75a30aeec0 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/configuration.go @@ -0,0 +1,67 @@ +/* + * Echo Service + * + * Echo Service API consists of a single service which returns a message. + * + * OpenAPI spec version: version not set + * + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package echo + +import ( + "encoding/base64" + "net/http" + "time" +) + + +type Configuration struct { + Username string `json:"userName,omitempty"` + Password string `json:"password,omitempty"` + APIKeyPrefix map[string]string `json:"APIKeyPrefix,omitempty"` + APIKey map[string]string `json:"APIKey,omitempty"` + Debug bool `json:"debug,omitempty"` + DebugFile string `json:"debugFile,omitempty"` + OAuthToken string `json:"oAuthToken,omitempty"` + BasePath string `json:"basePath,omitempty"` + Host string `json:"host,omitempty"` + Scheme string `json:"scheme,omitempty"` + AccessToken string `json:"accessToken,omitempty"` + DefaultHeader map[string]string `json:"defaultHeader,omitempty"` + UserAgent string `json:"userAgent,omitempty"` + APIClient *APIClient + Transport *http.Transport + Timeout *time.Duration `json:"timeout,omitempty"` +} + +func NewConfiguration() *Configuration { + cfg := &Configuration{ + BasePath: "http://localhost", + DefaultHeader: make(map[string]string), + APIKey: make(map[string]string), + APIKeyPrefix: make(map[string]string), + UserAgent: "Swagger-Codegen/1.0.0/go", + APIClient: &APIClient{}, + } + + cfg.APIClient.config = cfg + return cfg +} + +func (c *Configuration) GetBasicAuthEncodedString() string { + return base64.StdEncoding.EncodeToString([]byte(c.Username + ":" + c.Password)) +} + +func (c *Configuration) AddDefaultHeader(key string, value string) { + c.DefaultHeader[key] = value +} + +func (c *Configuration) GetAPIKeyWithPrefix(APIKeyIdentifier string) string { + if c.APIKeyPrefix[APIKeyIdentifier] != "" { + return c.APIKeyPrefix[APIKeyIdentifier] + " " + c.APIKey[APIKeyIdentifier] + } + + return c.APIKey[APIKeyIdentifier] +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/echo_service_api.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/echo_service_api.go new file mode 100644 index 000000000000..1f562327d3d6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/echo_service_api.go @@ -0,0 +1,224 @@ +/* + * Echo Service + * + * Echo Service API consists of a single service which returns a message. + * + * OpenAPI spec version: version not set + * + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package echo + +import ( + "net/url" + "strings" + "encoding/json" + "fmt" +) + +type EchoServiceApi struct { + Configuration *Configuration +} + +func NewEchoServiceApi() *EchoServiceApi { + configuration := NewConfiguration() + return &EchoServiceApi{ + Configuration: configuration, + } +} + +func NewEchoServiceApiWithBasePath(basePath string) *EchoServiceApi { + configuration := NewConfiguration() + configuration.BasePath = basePath + + return &EchoServiceApi{ + Configuration: configuration, + } +} + +/** + * Echo method receives a simple message and returns it. + * The message posted as the id parameter will also be returned. + * + * @param id + * @return *ExamplepbSimpleMessage + */ +func (a EchoServiceApi) Echo(id string) (*ExamplepbSimpleMessage, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Post") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/echo/{id}" + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", fmt.Sprintf("%v", id), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ExamplepbSimpleMessage) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Echo", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * Echo method receives a simple message and returns it. + * The message posted as the id parameter will also be returned. + * + * @param id + * @param num + * @return *ExamplepbSimpleMessage + */ +func (a EchoServiceApi) Echo2(id string, num string) (*ExamplepbSimpleMessage, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Get") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/echo/{id}/{num}" + localVarPath = strings.Replace(localVarPath, "{"+"id"+"}", fmt.Sprintf("%v", id), -1) + localVarPath = strings.Replace(localVarPath, "{"+"num"+"}", fmt.Sprintf("%v", num), -1) + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + var successPayload = new(ExamplepbSimpleMessage) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "Echo2", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + +/** + * EchoBody method receives a simple message and returns it. + * + * @param body + * @return *ExamplepbSimpleMessage + */ +func (a EchoServiceApi) EchoBody(body ExamplepbSimpleMessage) (*ExamplepbSimpleMessage, *APIResponse, error) { + + var localVarHttpMethod = strings.ToUpper("Post") + // create path and map variables + localVarPath := a.Configuration.BasePath + "/v1/example/echo_body" + + localVarHeaderParams := make(map[string]string) + localVarQueryParams := url.Values{} + localVarFormParams := make(map[string]string) + var localVarPostBody interface{} + var localVarFileName string + var localVarFileBytes []byte + // add default headers if any + for key := range a.Configuration.DefaultHeader { + localVarHeaderParams[key] = a.Configuration.DefaultHeader[key] + } + + // to determine the Content-Type header + localVarHttpContentTypes := []string{ "application/json", } + + // set Content-Type header + localVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes) + if localVarHttpContentType != "" { + localVarHeaderParams["Content-Type"] = localVarHttpContentType + } + // to determine the Accept header + localVarHttpHeaderAccepts := []string{ + "application/json", + } + + // set Accept header + localVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts) + if localVarHttpHeaderAccept != "" { + localVarHeaderParams["Accept"] = localVarHttpHeaderAccept + } + // body params + localVarPostBody = &body + var successPayload = new(ExamplepbSimpleMessage) + localVarHttpResponse, err := a.Configuration.APIClient.CallAPI(localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes) + + var localVarURL, _ = url.Parse(localVarPath) + localVarURL.RawQuery = localVarQueryParams.Encode() + var localVarAPIResponse = &APIResponse{Operation: "EchoBody", Method: localVarHttpMethod, RequestURL: localVarURL.String()} + if localVarHttpResponse != nil { + localVarAPIResponse.Response = localVarHttpResponse.RawResponse + localVarAPIResponse.Payload = localVarHttpResponse.Body() + } + + if err != nil { + return successPayload, localVarAPIResponse, err + } + err = json.Unmarshal(localVarHttpResponse.Body(), &successPayload) + return successPayload, localVarAPIResponse, err +} + diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/examplepb_simple_message.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/examplepb_simple_message.go new file mode 100644 index 000000000000..98eb83250838 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo/examplepb_simple_message.go @@ -0,0 +1,20 @@ +/* + * Echo Service + * + * Echo Service API consists of a single service which returns a message. + * + * OpenAPI spec version: version not set + * + * Generated by: https://github.com/swagger-api/swagger-codegen.git + */ + +package echo + +// SimpleMessage represents a simple message sent to the Echo service. +type ExamplepbSimpleMessage struct { + + // Id represents the message identifier. + Id string `json:"id,omitempty"` + + Num string `json:"num,omitempty"` +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.go index 4a3548496ee8..2727fba7b071 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.go @@ -1,17 +1,18 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/examplepb/a_bit_of_everything.proto -// DO NOT EDIT! package examplepb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" +import _ "google.golang.org/genproto/googleapis/api/annotations" import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf2 "github.com/golang/protobuf/ptypes/duration" import grpc_gateway_examples_sub "github.com/grpc-ecosystem/grpc-gateway/examples/sub" import sub2 "github.com/grpc-ecosystem/grpc-gateway/examples/sub2" -import google_protobuf2 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" import ( context "golang.org/x/net/context" @@ -104,7 +105,9 @@ type ABitOfEverything struct { MappedStringValue map[string]string `protobuf:"bytes,23,rep,name=mapped_string_value,json=mappedStringValue" json:"mapped_string_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` MappedNestedValue map[string]*ABitOfEverything_Nested `protobuf:"bytes,24,rep,name=mapped_nested_value,json=mappedNestedValue" json:"mapped_nested_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` NonConventionalNameValue string `protobuf:"bytes,26,opt,name=nonConventionalNameValue" json:"nonConventionalNameValue,omitempty"` - TimestampValue *google_protobuf2.Timestamp `protobuf:"bytes,27,opt,name=timestamp_value,json=timestampValue" json:"timestamp_value,omitempty"` + TimestampValue *google_protobuf3.Timestamp `protobuf:"bytes,27,opt,name=timestamp_value,json=timestampValue" json:"timestamp_value,omitempty"` + // repeated enum value. it is comma-separated in query + RepeatedEnumValue []NumericEnum `protobuf:"varint,28,rep,packed,name=repeated_enum_value,json=repeatedEnumValue,enum=grpc.gateway.examples.examplepb.NumericEnum" json:"repeated_enum_value,omitempty"` } func (m *ABitOfEverything) Reset() { *m = ABitOfEverything{} } @@ -140,6 +143,13 @@ func (m *ABitOfEverything) GetSingleNested() *ABitOfEverything_Nested { return nil } +func (m *ABitOfEverything) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + func (m *ABitOfEverything) GetNested() []*ABitOfEverything_Nested { if m != nil { return m.Nested @@ -147,6 +157,118 @@ func (m *ABitOfEverything) GetNested() []*ABitOfEverything_Nested { return nil } +func (m *ABitOfEverything) GetFloatValue() float32 { + if m != nil { + return m.FloatValue + } + return 0 +} + +func (m *ABitOfEverything) GetDoubleValue() float64 { + if m != nil { + return m.DoubleValue + } + return 0 +} + +func (m *ABitOfEverything) GetInt64Value() int64 { + if m != nil { + return m.Int64Value + } + return 0 +} + +func (m *ABitOfEverything) GetUint64Value() uint64 { + if m != nil { + return m.Uint64Value + } + return 0 +} + +func (m *ABitOfEverything) GetInt32Value() int32 { + if m != nil { + return m.Int32Value + } + return 0 +} + +func (m *ABitOfEverything) GetFixed64Value() uint64 { + if m != nil { + return m.Fixed64Value + } + return 0 +} + +func (m *ABitOfEverything) GetFixed32Value() uint32 { + if m != nil { + return m.Fixed32Value + } + return 0 +} + +func (m *ABitOfEverything) GetBoolValue() bool { + if m != nil { + return m.BoolValue + } + return false +} + +func (m *ABitOfEverything) GetStringValue() string { + if m != nil { + return m.StringValue + } + return "" +} + +func (m *ABitOfEverything) GetUint32Value() uint32 { + if m != nil { + return m.Uint32Value + } + return 0 +} + +func (m *ABitOfEverything) GetEnumValue() NumericEnum { + if m != nil { + return m.EnumValue + } + return NumericEnum_ZERO +} + +func (m *ABitOfEverything) GetSfixed32Value() int32 { + if m != nil { + return m.Sfixed32Value + } + return 0 +} + +func (m *ABitOfEverything) GetSfixed64Value() int64 { + if m != nil { + return m.Sfixed64Value + } + return 0 +} + +func (m *ABitOfEverything) GetSint32Value() int32 { + if m != nil { + return m.Sint32Value + } + return 0 +} + +func (m *ABitOfEverything) GetSint64Value() int64 { + if m != nil { + return m.Sint64Value + } + return 0 +} + +func (m *ABitOfEverything) GetRepeatedStringValue() []string { + if m != nil { + return m.RepeatedStringValue + } + return nil +} + func (m *ABitOfEverything) GetOneofEmpty() *google_protobuf1.Empty { if x, ok := m.GetOneofValue().(*ABitOfEverything_OneofEmpty); ok { return x.OneofEmpty @@ -182,13 +304,27 @@ func (m *ABitOfEverything) GetMappedNestedValue() map[string]*ABitOfEverything_N return nil } -func (m *ABitOfEverything) GetTimestampValue() *google_protobuf2.Timestamp { +func (m *ABitOfEverything) GetNonConventionalNameValue() string { + if m != nil { + return m.NonConventionalNameValue + } + return "" +} + +func (m *ABitOfEverything) GetTimestampValue() *google_protobuf3.Timestamp { if m != nil { return m.TimestampValue } return nil } +func (m *ABitOfEverything) GetRepeatedEnumValue() []NumericEnum { + if m != nil { + return m.RepeatedEnumValue + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*ABitOfEverything) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _ABitOfEverything_OneofMarshaler, _ABitOfEverything_OneofUnmarshaler, _ABitOfEverything_OneofSizer, []interface{}{ @@ -272,6 +408,27 @@ func (m *ABitOfEverything_Nested) String() string { return proto.Comp func (*ABitOfEverything_Nested) ProtoMessage() {} func (*ABitOfEverything_Nested) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } +func (m *ABitOfEverything_Nested) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ABitOfEverything_Nested) GetAmount() uint32 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *ABitOfEverything_Nested) GetOk() ABitOfEverything_Nested_DeepEnum { + if m != nil { + return m.Ok + } + return ABitOfEverything_Nested_FALSE +} + func init() { proto.RegisterType((*ABitOfEverything)(nil), "grpc.gateway.examples.examplepb.ABitOfEverything") proto.RegisterType((*ABitOfEverything_Nested)(nil), "grpc.gateway.examples.examplepb.ABitOfEverything.Nested") @@ -295,8 +452,17 @@ type ABitOfEverythingServiceClient interface { Lookup(ctx context.Context, in *sub2.IdMessage, opts ...grpc.CallOption) (*ABitOfEverything, error) Update(ctx context.Context, in *ABitOfEverything, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) Delete(ctx context.Context, in *sub2.IdMessage, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + GetQuery(ctx context.Context, in *ABitOfEverything, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) + // Echo allows posting a StringMessage value. + // + // It also exposes multiple bindings. + // + // This makes it useful when validating that the OpenAPI v2 API + // description exposes documentation correctly on all paths + // defined as additional_bindings in the proto. Echo(ctx context.Context, in *grpc_gateway_examples_sub.StringMessage, opts ...grpc.CallOption) (*grpc_gateway_examples_sub.StringMessage, error) DeepPathEcho(ctx context.Context, in *ABitOfEverything, opts ...grpc.CallOption) (*ABitOfEverything, error) + NoBindings(ctx context.Context, in *google_protobuf2.Duration, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) Timeout(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) } @@ -353,6 +519,15 @@ func (c *aBitOfEverythingServiceClient) Delete(ctx context.Context, in *sub2.IdM return out, nil } +func (c *aBitOfEverythingServiceClient) GetQuery(ctx context.Context, in *ABitOfEverything, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/grpc.gateway.examples.examplepb.ABitOfEverythingService/GetQuery", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *aBitOfEverythingServiceClient) Echo(ctx context.Context, in *grpc_gateway_examples_sub.StringMessage, opts ...grpc.CallOption) (*grpc_gateway_examples_sub.StringMessage, error) { out := new(grpc_gateway_examples_sub.StringMessage) err := grpc.Invoke(ctx, "/grpc.gateway.examples.examplepb.ABitOfEverythingService/Echo", in, out, c.cc, opts...) @@ -371,6 +546,15 @@ func (c *aBitOfEverythingServiceClient) DeepPathEcho(ctx context.Context, in *AB return out, nil } +func (c *aBitOfEverythingServiceClient) NoBindings(ctx context.Context, in *google_protobuf2.Duration, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/grpc.gateway.examples.examplepb.ABitOfEverythingService/NoBindings", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *aBitOfEverythingServiceClient) Timeout(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { out := new(google_protobuf1.Empty) err := grpc.Invoke(ctx, "/grpc.gateway.examples.examplepb.ABitOfEverythingService/Timeout", in, out, c.cc, opts...) @@ -388,8 +572,17 @@ type ABitOfEverythingServiceServer interface { Lookup(context.Context, *sub2.IdMessage) (*ABitOfEverything, error) Update(context.Context, *ABitOfEverything) (*google_protobuf1.Empty, error) Delete(context.Context, *sub2.IdMessage) (*google_protobuf1.Empty, error) + GetQuery(context.Context, *ABitOfEverything) (*google_protobuf1.Empty, error) + // Echo allows posting a StringMessage value. + // + // It also exposes multiple bindings. + // + // This makes it useful when validating that the OpenAPI v2 API + // description exposes documentation correctly on all paths + // defined as additional_bindings in the proto. Echo(context.Context, *grpc_gateway_examples_sub.StringMessage) (*grpc_gateway_examples_sub.StringMessage, error) DeepPathEcho(context.Context, *ABitOfEverything) (*ABitOfEverything, error) + NoBindings(context.Context, *google_protobuf2.Duration) (*google_protobuf1.Empty, error) Timeout(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error) } @@ -487,6 +680,24 @@ func _ABitOfEverythingService_Delete_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _ABitOfEverythingService_GetQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ABitOfEverything) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABitOfEverythingServiceServer).GetQuery(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.gateway.examples.examplepb.ABitOfEverythingService/GetQuery", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABitOfEverythingServiceServer).GetQuery(ctx, req.(*ABitOfEverything)) + } + return interceptor(ctx, in, info, handler) +} + func _ABitOfEverythingService_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(grpc_gateway_examples_sub.StringMessage) if err := dec(in); err != nil { @@ -523,6 +734,24 @@ func _ABitOfEverythingService_DeepPathEcho_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } +func _ABitOfEverythingService_NoBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_protobuf2.Duration) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABitOfEverythingServiceServer).NoBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.gateway.examples.examplepb.ABitOfEverythingService/NoBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABitOfEverythingServiceServer).NoBindings(ctx, req.(*google_protobuf2.Duration)) + } + return interceptor(ctx, in, info, handler) +} + func _ABitOfEverythingService_Timeout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { @@ -565,6 +794,10 @@ var _ABitOfEverythingService_serviceDesc = grpc.ServiceDesc{ MethodName: "Delete", Handler: _ABitOfEverythingService_Delete_Handler, }, + { + MethodName: "GetQuery", + Handler: _ABitOfEverythingService_GetQuery_Handler, + }, { MethodName: "Echo", Handler: _ABitOfEverythingService_Echo_Handler, @@ -573,6 +806,10 @@ var _ABitOfEverythingService_serviceDesc = grpc.ServiceDesc{ MethodName: "DeepPathEcho", Handler: _ABitOfEverythingService_DeepPathEcho_Handler, }, + { + MethodName: "NoBindings", + Handler: _ABitOfEverythingService_NoBindings_Handler, + }, { MethodName: "Timeout", Handler: _ABitOfEverythingService_Timeout_Handler, @@ -582,83 +819,172 @@ var _ABitOfEverythingService_serviceDesc = grpc.ServiceDesc{ Metadata: "examples/examplepb/a_bit_of_everything.proto", } +// Client API for AnotherServiceWithNoBindings service + +type AnotherServiceWithNoBindingsClient interface { + NoBindings(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) +} + +type anotherServiceWithNoBindingsClient struct { + cc *grpc.ClientConn +} + +func NewAnotherServiceWithNoBindingsClient(cc *grpc.ClientConn) AnotherServiceWithNoBindingsClient { + return &anotherServiceWithNoBindingsClient{cc} +} + +func (c *anotherServiceWithNoBindingsClient) NoBindings(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { + out := new(google_protobuf1.Empty) + err := grpc.Invoke(ctx, "/grpc.gateway.examples.examplepb.AnotherServiceWithNoBindings/NoBindings", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for AnotherServiceWithNoBindings service + +type AnotherServiceWithNoBindingsServer interface { + NoBindings(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error) +} + +func RegisterAnotherServiceWithNoBindingsServer(s *grpc.Server, srv AnotherServiceWithNoBindingsServer) { + s.RegisterService(&_AnotherServiceWithNoBindings_serviceDesc, srv) +} + +func _AnotherServiceWithNoBindings_NoBindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_protobuf1.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnotherServiceWithNoBindingsServer).NoBindings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.gateway.examples.examplepb.AnotherServiceWithNoBindings/NoBindings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnotherServiceWithNoBindingsServer).NoBindings(ctx, req.(*google_protobuf1.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _AnotherServiceWithNoBindings_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gateway.examples.examplepb.AnotherServiceWithNoBindings", + HandlerType: (*AnotherServiceWithNoBindingsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NoBindings", + Handler: _AnotherServiceWithNoBindings_NoBindings_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "examples/examplepb/a_bit_of_everything.proto", +} + func init() { proto.RegisterFile("examples/examplepb/a_bit_of_everything.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 1198 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6e, 0xdb, 0x46, - 0x17, 0xf6, 0x48, 0xb6, 0x6c, 0x1d, 0x5a, 0xb2, 0x3c, 0x8e, 0x1d, 0x45, 0xfe, 0x7f, 0x88, 0x55, - 0xda, 0x82, 0x70, 0x0d, 0x12, 0x96, 0x83, 0x22, 0x31, 0x50, 0x04, 0xbe, 0xa8, 0x70, 0xd1, 0xc4, - 0x4e, 0xe9, 0x24, 0x0b, 0xa3, 0x81, 0x40, 0x49, 0x23, 0x99, 0xb0, 0xc8, 0x21, 0xc8, 0xa1, 0x6a, - 0x41, 0x55, 0x17, 0x5d, 0xf4, 0x05, 0xba, 0xe8, 0x2e, 0x9b, 0x02, 0x45, 0x37, 0x5d, 0x76, 0xdd, - 0x87, 0xe8, 0x1b, 0x14, 0x7d, 0x90, 0x82, 0x33, 0x24, 0x4d, 0xc9, 0x16, 0xe4, 0x0b, 0x90, 0x9d, - 0x66, 0xe6, 0x3b, 0xdf, 0x77, 0x2e, 0x73, 0xce, 0x50, 0xb0, 0x49, 0x2e, 0x0c, 0xcb, 0xe9, 0x12, - 0x4f, 0x0b, 0x7f, 0x38, 0x0d, 0xcd, 0xa8, 0x37, 0x4c, 0x56, 0xa7, 0xed, 0x3a, 0xe9, 0x11, 0xb7, - 0xcf, 0xce, 0x4c, 0xbb, 0xa3, 0x3a, 0x2e, 0x65, 0x14, 0x97, 0x3b, 0xae, 0xd3, 0x54, 0x3b, 0x06, - 0x23, 0xdf, 0x19, 0x7d, 0x35, 0x32, 0x55, 0x63, 0xd3, 0xd2, 0xff, 0x3a, 0x94, 0x76, 0xba, 0x44, - 0x33, 0x1c, 0x53, 0x33, 0x6c, 0x9b, 0x32, 0x83, 0x99, 0xd4, 0xf6, 0x84, 0x79, 0x69, 0x3d, 0x3c, - 0xe5, 0xab, 0x86, 0xdf, 0xd6, 0x88, 0xe5, 0xb0, 0x7e, 0x78, 0x58, 0x8a, 0x3d, 0xf1, 0xfc, 0x86, - 0x66, 0x11, 0xcf, 0x33, 0x3a, 0x24, 0x32, 0x4c, 0x9e, 0x55, 0xc7, 0x0e, 0xcb, 0xe3, 0xac, 0xcc, - 0xb4, 0x88, 0xc7, 0x0c, 0xcb, 0x11, 0x80, 0xca, 0x3f, 0x79, 0x28, 0xec, 0xee, 0x99, 0xec, 0xb8, - 0x5d, 0x8b, 0x03, 0xc2, 0xef, 0x20, 0xe7, 0x99, 0x76, 0xa7, 0x4b, 0xea, 0x36, 0xf1, 0x18, 0x69, - 0x15, 0x1f, 0xc9, 0x48, 0x91, 0xaa, 0x4f, 0xd5, 0x29, 0x21, 0xaa, 0xe3, 0x4c, 0xea, 0x11, 0xb7, - 0xd7, 0x17, 0x05, 0x9d, 0x58, 0x61, 0x0c, 0xb3, 0xbe, 0x6f, 0xb6, 0x8a, 0x48, 0x46, 0x4a, 0x56, - 0xe7, 0xbf, 0xf1, 0x2b, 0xc8, 0x84, 0x5a, 0x29, 0x39, 0x7d, 0x2f, 0xad, 0x90, 0x07, 0x97, 0x41, - 0x6a, 0x77, 0xa9, 0xc1, 0xea, 0x3d, 0xa3, 0xeb, 0x93, 0x62, 0x5a, 0x46, 0x4a, 0x4a, 0x07, 0xbe, - 0xf5, 0x36, 0xd8, 0xc1, 0x1f, 0xc1, 0x62, 0x8b, 0xfa, 0x8d, 0x2e, 0x09, 0x11, 0xb3, 0x32, 0x52, - 0x90, 0x2e, 0x89, 0x3d, 0x01, 0x29, 0x83, 0x64, 0xda, 0xec, 0xf3, 0x27, 0x21, 0x62, 0x4e, 0x46, - 0x4a, 0x5a, 0x07, 0xbe, 0x15, 0x73, 0xf8, 0x49, 0x44, 0x46, 0x46, 0xca, 0xac, 0x2e, 0xf9, 0x09, - 0x88, 0xe0, 0xd8, 0xae, 0x86, 0x88, 0x79, 0x19, 0x29, 0x73, 0x9c, 0x63, 0xbb, 0x2a, 0x00, 0x8f, - 0x21, 0xd7, 0x36, 0x2f, 0x48, 0x2b, 0x26, 0x59, 0x90, 0x91, 0x92, 0xd1, 0x17, 0xc3, 0xcd, 0x51, - 0x50, 0xcc, 0x93, 0x95, 0x91, 0x32, 0x1f, 0x82, 0x22, 0xa6, 0xff, 0x03, 0x34, 0x28, 0xed, 0x86, - 0x08, 0x90, 0x91, 0xb2, 0xa0, 0x67, 0x83, 0x9d, 0xd8, 0x59, 0x8f, 0xb9, 0xa6, 0xdd, 0x09, 0x01, - 0x12, 0xcf, 0xbf, 0x24, 0xf6, 0x46, 0xe2, 0x89, 0x55, 0x72, 0x32, 0x52, 0x72, 0x22, 0x9e, 0x48, - 0xe4, 0x6b, 0x00, 0x62, 0xfb, 0x56, 0x08, 0xc8, 0xcb, 0x48, 0xc9, 0x57, 0x37, 0xa7, 0x56, 0xeb, - 0xc8, 0xb7, 0x88, 0x6b, 0x36, 0x6b, 0xb6, 0x6f, 0xe9, 0xd9, 0xc0, 0x5e, 0x90, 0x7d, 0x02, 0x79, - 0x6f, 0x34, 0xae, 0x25, 0x19, 0x29, 0x4b, 0x7a, 0xce, 0x1b, 0x09, 0x2c, 0x86, 0xc5, 0x39, 0x2a, - 0xc8, 0x48, 0x29, 0x44, 0xb0, 0x44, 0x35, 0xbc, 0xa4, 0xf7, 0xcb, 0x32, 0x52, 0x96, 0x75, 0xc9, - 0x4b, 0x78, 0x1f, 0x42, 0x62, 0x1e, 0x2c, 0x23, 0x05, 0x0b, 0x48, 0xc4, 0x52, 0x85, 0x55, 0x97, - 0x38, 0xc4, 0x60, 0xa4, 0x55, 0x1f, 0xc9, 0xd7, 0x8a, 0x9c, 0x56, 0xb2, 0xfa, 0x4a, 0x74, 0x78, - 0x92, 0xc8, 0xdb, 0x33, 0x90, 0xa8, 0x4d, 0x82, 0xb1, 0x10, 0x74, 0x6d, 0xf1, 0x01, 0xef, 0x97, - 0x35, 0x55, 0x74, 0x9f, 0x1a, 0x75, 0x9f, 0x5a, 0x0b, 0x4e, 0x0f, 0x67, 0x74, 0xe0, 0x60, 0xbe, - 0xc2, 0x8f, 0x61, 0x51, 0x98, 0x0a, 0xad, 0xe2, 0x6a, 0x50, 0x95, 0xc3, 0x19, 0x5d, 0x10, 0x0a, - 0x11, 0xfc, 0x2d, 0x64, 0x2d, 0xc3, 0x09, 0xfd, 0x58, 0xe3, 0x1d, 0xf2, 0xfc, 0xf6, 0x1d, 0xf2, - 0xd2, 0x70, 0xb8, 0xbb, 0x35, 0x9b, 0xb9, 0x7d, 0x7d, 0xc1, 0x0a, 0x97, 0xf8, 0x02, 0x56, 0x2c, - 0xc3, 0x71, 0xc6, 0xe3, 0x7d, 0xc8, 0x75, 0x0e, 0xef, 0xa4, 0xe3, 0x8c, 0xe4, 0x47, 0x08, 0x2e, - 0x5b, 0xe3, 0xfb, 0x09, 0x65, 0xd1, 0xb5, 0xa1, 0x72, 0xf1, 0x7e, 0xca, 0x62, 0x12, 0x5c, 0x55, - 0x4e, 0xec, 0xe3, 0x1d, 0x28, 0xda, 0xd4, 0xde, 0xa7, 0x76, 0x8f, 0xd8, 0xc1, 0x1c, 0x36, 0xba, - 0x47, 0x86, 0x25, 0xda, 0xbe, 0x58, 0xe2, 0x8d, 0x31, 0xf1, 0x1c, 0xef, 0xc3, 0x52, 0x3c, 0x47, - 0x43, 0x8f, 0xd7, 0x79, 0xc5, 0x4b, 0x57, 0x2a, 0xfe, 0x3a, 0xc2, 0xe9, 0xf9, 0xd8, 0x84, 0x93, - 0x94, 0x7e, 0x47, 0x90, 0xb9, 0x1c, 0x88, 0xb6, 0x61, 0x91, 0x68, 0x20, 0x06, 0xbf, 0xf1, 0x1a, - 0x64, 0x0c, 0x8b, 0xfa, 0x36, 0x2b, 0xa6, 0x78, 0x0f, 0x86, 0x2b, 0xfc, 0x0d, 0xa4, 0xe8, 0x39, - 0x9f, 0x66, 0xf9, 0xea, 0xee, 0x5d, 0x87, 0xa4, 0x7a, 0x40, 0x88, 0xc3, 0x7b, 0x31, 0x45, 0xcf, - 0x2b, 0x65, 0x58, 0x88, 0xd6, 0x38, 0x0b, 0x73, 0x5f, 0xee, 0xbe, 0x38, 0xa9, 0x15, 0x66, 0xf0, - 0x02, 0xcc, 0xbe, 0xd6, 0xdf, 0xd4, 0x0a, 0xa8, 0x64, 0x42, 0x6e, 0xe4, 0xea, 0xe0, 0x02, 0xa4, - 0xcf, 0x49, 0x3f, 0xf4, 0x37, 0xf8, 0x89, 0xf7, 0x60, 0x4e, 0x24, 0x22, 0x75, 0x87, 0x81, 0x20, - 0x4c, 0x77, 0x52, 0x4f, 0x51, 0xe9, 0x00, 0xd6, 0xae, 0xbf, 0x3d, 0xd7, 0x68, 0x3e, 0x48, 0x6a, - 0x66, 0x93, 0x2c, 0x3f, 0x44, 0x2c, 0xe3, 0x37, 0xe1, 0x1a, 0x96, 0xa3, 0x24, 0xcb, 0x7d, 0x1e, - 0x9e, 0x4b, 0xfd, 0xbd, 0x5c, 0x34, 0x0e, 0xf8, 0xd6, 0x86, 0x0c, 0x52, 0x22, 0xdc, 0x20, 0xb1, - 0xa7, 0x35, 0xfd, 0xb8, 0x30, 0x83, 0xe7, 0x21, 0x7d, 0x7c, 0x54, 0x2b, 0xa0, 0xea, 0x2f, 0x12, - 0x3c, 0x1c, 0xe7, 0x3d, 0x21, 0x6e, 0xcf, 0x6c, 0x12, 0xfc, 0x3e, 0x0d, 0x99, 0x7d, 0x37, 0x18, - 0x39, 0x78, 0xeb, 0xd6, 0xce, 0x95, 0x6e, 0x6f, 0x52, 0xf9, 0x23, 0xf5, 0xe3, 0xdf, 0xff, 0xfe, - 0x9c, 0xfa, 0x2d, 0x55, 0xf9, 0x35, 0xa5, 0xf5, 0xb6, 0xa2, 0xaf, 0x9f, 0xeb, 0xbe, 0x7d, 0xb4, - 0x41, 0xe2, 0x8d, 0x1d, 0x6a, 0x83, 0xe4, 0x83, 0x3a, 0xd4, 0x06, 0x89, 0x49, 0x3b, 0xd4, 0x3c, - 0xe2, 0x18, 0xae, 0xc1, 0xa8, 0xab, 0x0d, 0xfc, 0x91, 0x83, 0x41, 0x62, 0x66, 0x0f, 0xb5, 0xc1, - 0xc8, 0xa0, 0x8f, 0xd6, 0x89, 0xf3, 0xcb, 0x27, 0x6e, 0xa8, 0x0d, 0x92, 0x03, 0xeb, 0x0b, 0x8f, - 0xb9, 0x8e, 0x4b, 0xda, 0xe6, 0x85, 0xb6, 0x31, 0x14, 0x22, 0x09, 0x33, 0x6f, 0x9c, 0xc7, 0x1b, - 0x17, 0xf2, 0xc6, 0x0c, 0x46, 0x9d, 0x9c, 0x34, 0x0d, 0x86, 0xf8, 0x3d, 0x02, 0x10, 0x05, 0xda, - 0xa3, 0xad, 0xfe, 0x07, 0x2a, 0xd2, 0x06, 0xaf, 0xd1, 0xc7, 0x95, 0xf2, 0x94, 0x0a, 0xed, 0xa0, - 0x0d, 0xfc, 0x3d, 0x64, 0x5e, 0x50, 0x7a, 0xee, 0x3b, 0x78, 0x49, 0x0d, 0x3e, 0x12, 0xd5, 0xaf, - 0x5a, 0x2f, 0xc5, 0x67, 0xe2, 0x5d, 0x94, 0x55, 0xae, 0xac, 0xe0, 0x4f, 0xa7, 0xde, 0x8d, 0xe0, - 0xcb, 0x6e, 0x88, 0x7f, 0x42, 0x90, 0x79, 0xe3, 0xb4, 0xee, 0x78, 0x7f, 0x27, 0x3c, 0xa2, 0x95, - 0x2d, 0xee, 0xc5, 0x67, 0xa5, 0x1b, 0x7a, 0x11, 0xa4, 0xc1, 0x80, 0xcc, 0x01, 0xe9, 0x12, 0x46, - 0xae, 0xa6, 0x61, 0x92, 0x4a, 0x18, 0xeb, 0xc6, 0x4d, 0x63, 0xfd, 0x0b, 0xc1, 0x6c, 0xad, 0x79, - 0x46, 0xb1, 0x32, 0x21, 0x52, 0xcf, 0x6f, 0xa8, 0x62, 0xb4, 0x45, 0xd2, 0x37, 0x46, 0x56, 0x9a, - 0xdc, 0x99, 0x77, 0x78, 0x73, 0x9a, 0x33, 0xa4, 0x79, 0x46, 0xb5, 0x81, 0xb8, 0xb8, 0xa7, 0x8f, - 0x2a, 0x05, 0xad, 0x57, 0x8d, 0xf1, 0xc1, 0xd9, 0x8e, 0x18, 0x55, 0xa7, 0x18, 0x5f, 0x39, 0xc2, - 0x7f, 0x22, 0x58, 0x0c, 0x5e, 0x83, 0x57, 0x06, 0x3b, 0xe3, 0x91, 0x7c, 0x98, 0xeb, 0xfc, 0x9c, - 0xc7, 0xf6, 0xac, 0xf2, 0x64, 0x6a, 0xa2, 0x47, 0xfe, 0x99, 0xa8, 0xc1, 0x5b, 0xc9, 0x8b, 0xfb, - 0x16, 0xe6, 0x83, 0xb7, 0x96, 0xfa, 0x0c, 0x4f, 0x28, 0xe6, 0xc4, 0x22, 0xaf, 0x73, 0xed, 0x55, - 0xbc, 0x92, 0x4c, 0x06, 0x13, 0x64, 0x7b, 0xd2, 0x69, 0x36, 0x76, 0xbb, 0x91, 0xe1, 0x96, 0xdb, - 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xdb, 0xf3, 0x9f, 0xf3, 0x1a, 0x0e, 0x00, 0x00, + // 1590 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0xd8, 0x89, 0x13, 0x8f, 0xf3, 0xc3, 0x99, 0xb4, 0xa9, 0xeb, 0xe6, 0xfb, 0xcd, 0xd4, + 0x2d, 0x68, 0x15, 0xe2, 0x5d, 0xe2, 0x56, 0xa8, 0xb5, 0x04, 0xc5, 0x49, 0xdc, 0x1f, 0xa2, 0x4d, + 0xdb, 0xed, 0x0f, 0xaa, 0xd0, 0x12, 0xad, 0xed, 0xb1, 0xbd, 0x8d, 0x77, 0x67, 0xd9, 0x9d, 0x4d, + 0x63, 0x19, 0x73, 0xe0, 0x80, 0xd4, 0x6b, 0xb8, 0x21, 0xd1, 0x0b, 0x12, 0x02, 0x09, 0x8e, 0x9c, + 0x38, 0x70, 0xe7, 0xca, 0x5f, 0x80, 0xc4, 0x1f, 0x82, 0x76, 0x66, 0x77, 0xb3, 0xb6, 0x63, 0xa5, + 0x4e, 0x51, 0x4f, 0xde, 0x99, 0xf9, 0xbc, 0xcf, 0xe7, 0xbd, 0x37, 0xf3, 0xde, 0x8c, 0xe1, 0x2a, + 0xd9, 0xd7, 0x0c, 0xab, 0x45, 0x1c, 0xc5, 0xff, 0xb0, 0x2a, 0x8a, 0xb6, 0x53, 0xd1, 0xd9, 0x0e, + 0xad, 0xef, 0x90, 0x3d, 0x62, 0xb7, 0x59, 0x53, 0x37, 0x1b, 0xb2, 0x65, 0x53, 0x46, 0xd1, 0x72, + 0xc3, 0xb6, 0xaa, 0x72, 0x43, 0x63, 0xe4, 0x85, 0xd6, 0x96, 0x03, 0x53, 0x39, 0x34, 0xcd, 0x2e, + 0x35, 0x28, 0x6d, 0xb4, 0x88, 0xa2, 0x59, 0xba, 0xa2, 0x99, 0x26, 0x65, 0x1a, 0xd3, 0xa9, 0xe9, + 0x08, 0xf3, 0xec, 0x39, 0x7f, 0x95, 0x8f, 0x2a, 0x6e, 0x5d, 0x21, 0x86, 0xc5, 0xda, 0xfe, 0xe2, + 0xff, 0xfb, 0x17, 0x6b, 0xae, 0xcd, 0xad, 0xfd, 0xf5, 0x6c, 0xe8, 0xa9, 0xe3, 0x56, 0x14, 0x83, + 0x38, 0x8e, 0xd6, 0x20, 0x01, 0x71, 0x74, 0xad, 0xd0, 0xb7, 0xb8, 0xdc, 0x4f, 0xcc, 0x74, 0x83, + 0x38, 0x4c, 0x33, 0x2c, 0x1f, 0xb0, 0xca, 0x7f, 0xaa, 0xf9, 0x06, 0x31, 0xf3, 0xce, 0x0b, 0xad, + 0xd1, 0x20, 0xb6, 0x42, 0x2d, 0xee, 0xf8, 0x60, 0x10, 0xb9, 0xef, 0xd3, 0x30, 0x5d, 0x5a, 0xd7, + 0xd9, 0xdd, 0x7a, 0x39, 0x4c, 0x0f, 0x7a, 0x06, 0x67, 0x1c, 0xdd, 0x6c, 0xb4, 0xc8, 0x8e, 0x49, + 0x1c, 0x46, 0x6a, 0x99, 0xb3, 0x18, 0x48, 0xa9, 0xc2, 0x15, 0xf9, 0x98, 0x84, 0xc9, 0xfd, 0x4c, + 0xf2, 0x16, 0xb7, 0x57, 0xa7, 0x05, 0x9d, 0x18, 0x21, 0x04, 0xc7, 0x5d, 0x57, 0xaf, 0x65, 0x00, + 0x06, 0x52, 0x52, 0xe5, 0xdf, 0xe8, 0x1e, 0x4c, 0xf8, 0x5a, 0x31, 0x1c, 0x7f, 0x23, 0x2d, 0x9f, + 0x07, 0x2d, 0xc3, 0x54, 0xbd, 0x45, 0x35, 0xb6, 0xb3, 0xa7, 0xb5, 0x5c, 0x92, 0x89, 0x63, 0x20, + 0xc5, 0x54, 0xc8, 0xa7, 0x1e, 0x7b, 0x33, 0xe8, 0x3c, 0x9c, 0xae, 0x51, 0xb7, 0xd2, 0x22, 0x3e, + 0x62, 0x1c, 0x03, 0x09, 0xa8, 0x29, 0x31, 0x27, 0x20, 0xcb, 0x30, 0xa5, 0x9b, 0xec, 0x83, 0xcb, + 0x3e, 0x62, 0x02, 0x03, 0x29, 0xae, 0x42, 0x3e, 0x15, 0x72, 0xb8, 0x51, 0x44, 0x02, 0x03, 0x69, + 0x5c, 0x4d, 0xb9, 0x11, 0x88, 0xe0, 0xb8, 0x54, 0xf0, 0x11, 0x93, 0x18, 0x48, 0x13, 0x9c, 0xe3, + 0x52, 0x41, 0x00, 0x2e, 0xc0, 0x99, 0xba, 0xbe, 0x4f, 0x6a, 0x21, 0xc9, 0x14, 0x06, 0x52, 0x42, + 0x9d, 0xf6, 0x27, 0x7b, 0x41, 0x21, 0x4f, 0x12, 0x03, 0x69, 0xd2, 0x07, 0x05, 0x4c, 0xff, 0x83, + 0xb0, 0x42, 0x69, 0xcb, 0x47, 0x40, 0x0c, 0xa4, 0x29, 0x35, 0xe9, 0xcd, 0x84, 0xce, 0x3a, 0xcc, + 0xd6, 0xcd, 0x86, 0x0f, 0x48, 0xf1, 0xfc, 0xa7, 0xc4, 0x5c, 0x4f, 0x3c, 0xa1, 0xca, 0x0c, 0x06, + 0xd2, 0x8c, 0x88, 0x27, 0x10, 0xf9, 0x04, 0x42, 0x62, 0xba, 0x86, 0x0f, 0x98, 0xc5, 0x40, 0x9a, + 0x2d, 0xac, 0x1e, 0xbb, 0x5b, 0x5b, 0xae, 0x41, 0x6c, 0xbd, 0x5a, 0x36, 0x5d, 0x43, 0x4d, 0x7a, + 0xf6, 0x82, 0xec, 0x1d, 0x38, 0xeb, 0xf4, 0xc6, 0x35, 0x87, 0x81, 0x34, 0xa7, 0xce, 0x38, 0x3d, + 0x81, 0x85, 0xb0, 0x30, 0x47, 0x69, 0x0c, 0xa4, 0x74, 0x00, 0x8b, 0xec, 0x86, 0x13, 0xf5, 0x7e, + 0x1e, 0x03, 0x69, 0x5e, 0x4d, 0x39, 0x11, 0xef, 0x7d, 0x48, 0xc8, 0x83, 0x30, 0x90, 0x90, 0x80, + 0x04, 0x2c, 0x05, 0x78, 0xda, 0x26, 0x16, 0xd1, 0x18, 0xa9, 0xed, 0xf4, 0xe4, 0x6b, 0x01, 0xc7, + 0xa5, 0xa4, 0xba, 0x10, 0x2c, 0x3e, 0x88, 0xe4, 0xed, 0x2a, 0x4c, 0x51, 0x93, 0x78, 0x4d, 0xc6, + 0xeb, 0x01, 0x99, 0x53, 0xbc, 0x5e, 0x16, 0x65, 0x51, 0xab, 0x72, 0x50, 0xab, 0x72, 0xd9, 0x5b, + 0xbd, 0x39, 0xa6, 0x42, 0x0e, 0xe6, 0x23, 0x74, 0x01, 0x4e, 0x0b, 0x53, 0xa1, 0x95, 0x39, 0xed, + 0xed, 0xca, 0xcd, 0x31, 0x55, 0x10, 0x0a, 0x11, 0xf4, 0x14, 0x26, 0x0d, 0xcd, 0xf2, 0xfd, 0x58, + 0xe4, 0x15, 0x72, 0x6d, 0xf4, 0x0a, 0xb9, 0xa3, 0x59, 0xdc, 0xdd, 0xb2, 0xc9, 0xec, 0xb6, 0x3a, + 0x65, 0xf8, 0x43, 0xb4, 0x0f, 0x17, 0x0c, 0xcd, 0xb2, 0xfa, 0xe3, 0x3d, 0xc3, 0x75, 0x6e, 0x9e, + 0x48, 0xc7, 0xea, 0xc9, 0x8f, 0x10, 0x9c, 0x37, 0xfa, 0xe7, 0x23, 0xca, 0xa2, 0x6a, 0x7d, 0xe5, + 0xcc, 0x9b, 0x29, 0x8b, 0x4e, 0x30, 0xa8, 0x1c, 0x99, 0x47, 0x45, 0x98, 0x31, 0xa9, 0xb9, 0x41, + 0xcd, 0x3d, 0x62, 0x7a, 0x0d, 0x51, 0x6b, 0x6d, 0x69, 0x86, 0x28, 0xfb, 0x4c, 0x96, 0x17, 0xc6, + 0xd0, 0x75, 0xb4, 0x01, 0xe7, 0xc2, 0xae, 0xeb, 0x7b, 0x7c, 0x8e, 0xef, 0x78, 0x76, 0x60, 0xc7, + 0x1f, 0x06, 0x38, 0x75, 0x36, 0x34, 0x11, 0x24, 0x4f, 0x61, 0x78, 0x92, 0x76, 0x22, 0x05, 0xb5, + 0x84, 0xe3, 0x23, 0x17, 0xd4, 0x7c, 0x40, 0x54, 0x0e, 0x0a, 0x2b, 0xfb, 0x13, 0x80, 0x89, 0xc3, + 0x76, 0x6b, 0x6a, 0x06, 0x09, 0xda, 0xad, 0xf7, 0x8d, 0x16, 0x61, 0x42, 0x33, 0xa8, 0x6b, 0xb2, + 0x4c, 0x8c, 0x57, 0xb8, 0x3f, 0x42, 0xf7, 0x61, 0x8c, 0xee, 0xf2, 0x5e, 0x39, 0x5b, 0x28, 0x9d, + 0xb4, 0x05, 0xcb, 0x9b, 0x84, 0x58, 0xdc, 0xb1, 0x18, 0xdd, 0xcd, 0x2d, 0xc3, 0xa9, 0x60, 0x8c, + 0x92, 0x70, 0xe2, 0x7a, 0xe9, 0xf6, 0x83, 0x72, 0x7a, 0x0c, 0x4d, 0xc1, 0xf1, 0x87, 0xea, 0xa3, + 0x72, 0x1a, 0x64, 0x75, 0x38, 0xd3, 0x73, 0x30, 0x51, 0x1a, 0xc6, 0x77, 0x49, 0xdb, 0xf7, 0xd7, + 0xfb, 0x44, 0xeb, 0x70, 0x42, 0x64, 0x27, 0x76, 0x82, 0x76, 0x23, 0x4c, 0x8b, 0xb1, 0x2b, 0x20, + 0xbb, 0x09, 0x17, 0x8f, 0x3e, 0x9b, 0x47, 0x68, 0x9e, 0x8a, 0x6a, 0x26, 0xa3, 0x2c, 0x5f, 0x05, + 0x2c, 0xfd, 0xe7, 0xec, 0x08, 0x96, 0xad, 0x28, 0xcb, 0x9b, 0x5c, 0x6b, 0x87, 0xfa, 0xc5, 0xcf, + 0x0e, 0x4a, 0x4f, 0x56, 0x1e, 0xc3, 0x8b, 0xd7, 0x75, 0xb3, 0x86, 0xa9, 0xcb, 0xb0, 0x41, 0x6d, + 0x82, 0xb5, 0x8a, 0xf7, 0x39, 0x70, 0x97, 0xcb, 0x4d, 0xc6, 0x2c, 0xa7, 0xa8, 0x28, 0x0d, 0x9d, + 0x35, 0xdd, 0x8a, 0x5c, 0xa5, 0x86, 0xe2, 0xf9, 0x90, 0x27, 0x55, 0xea, 0xb4, 0x1d, 0x46, 0xfc, + 0xa1, 0xef, 0xd2, 0xfa, 0x4c, 0xd0, 0xc9, 0xb8, 0xde, 0x0a, 0x86, 0xa9, 0x48, 0x2e, 0xbd, 0x5d, + 0xdb, 0x2e, 0xab, 0x77, 0xd3, 0x63, 0x68, 0x12, 0xc6, 0xef, 0x6e, 0x95, 0xd3, 0xa0, 0xf0, 0xe7, + 0x1c, 0x3c, 0xd3, 0xaf, 0xfa, 0x80, 0xd8, 0x7b, 0x7a, 0x95, 0xa0, 0x57, 0x71, 0x98, 0xd8, 0xb0, + 0xbd, 0xa3, 0x89, 0xd6, 0x46, 0x8e, 0x3c, 0x3b, 0xba, 0x49, 0xee, 0xd7, 0xd8, 0xd7, 0x7f, 0xfd, + 0xf3, 0x6d, 0xec, 0xc7, 0x58, 0xee, 0x87, 0x98, 0xb2, 0xb7, 0x16, 0x3c, 0x03, 0x8f, 0x7a, 0x04, + 0x2a, 0x9d, 0xc8, 0xf3, 0xa0, 0xab, 0x74, 0xa2, 0x6f, 0x81, 0xae, 0xd2, 0x89, 0x5c, 0x12, 0x5d, + 0xc5, 0x21, 0x96, 0x66, 0x6b, 0x8c, 0xda, 0x4a, 0xc7, 0xed, 0x59, 0xe8, 0x44, 0xae, 0x9b, 0xae, + 0xd2, 0xe9, 0xb9, 0xa3, 0x82, 0x71, 0x64, 0xfd, 0xf0, 0x76, 0xee, 0x2a, 0x9d, 0x68, 0xaf, 0xfd, + 0xd0, 0x61, 0xb6, 0x65, 0x93, 0xba, 0xbe, 0xaf, 0xac, 0x74, 0x85, 0x48, 0xc4, 0xcc, 0xe9, 0xe7, + 0x71, 0xfa, 0x85, 0x9c, 0x3e, 0x83, 0x5e, 0x27, 0x87, 0x35, 0xb2, 0x2e, 0x7a, 0x05, 0x20, 0x14, + 0x1b, 0xb4, 0x4e, 0x6b, 0xed, 0xb7, 0xb4, 0x49, 0x2b, 0x7c, 0x8f, 0x2e, 0xe6, 0x96, 0x8f, 0xd9, + 0xa1, 0x22, 0x58, 0x41, 0x5f, 0xc2, 0xc4, 0x6d, 0x4a, 0x77, 0x5d, 0x0b, 0xcd, 0xc9, 0xde, 0x6b, + 0x58, 0xbe, 0x55, 0xbb, 0x23, 0xde, 0xc3, 0x27, 0x51, 0x96, 0xb9, 0xb2, 0x84, 0xde, 0x3d, 0xf6, + 0x6c, 0x78, 0x8f, 0xd2, 0x2e, 0xfa, 0x06, 0xc0, 0xc4, 0x23, 0xab, 0x76, 0xc2, 0xf3, 0x3b, 0xe4, + 0xfe, 0xcf, 0xad, 0x71, 0x2f, 0xde, 0xcb, 0xbe, 0xa6, 0x17, 0x5e, 0x1a, 0x34, 0x98, 0xd8, 0x24, + 0x2d, 0xc2, 0xc8, 0x60, 0x1a, 0x86, 0xa9, 0xf8, 0xb1, 0xae, 0xbc, 0x6e, 0xac, 0x7f, 0x03, 0x38, + 0x75, 0x83, 0xb0, 0xfb, 0x2e, 0xb1, 0xdb, 0xff, 0x65, 0xb4, 0x2f, 0xc1, 0x41, 0x49, 0xcd, 0x6d, + 0xc1, 0xa5, 0xa3, 0xda, 0x55, 0x28, 0x38, 0x62, 0x9b, 0x7a, 0x02, 0x78, 0x74, 0x32, 0x5a, 0x3d, + 0x2e, 0xba, 0x2f, 0x3c, 0xfa, 0x20, 0xc6, 0x97, 0x31, 0x38, 0x5e, 0xae, 0x36, 0x29, 0x92, 0x86, + 0xc4, 0xe7, 0xb8, 0x15, 0x59, 0xdc, 0x0d, 0x41, 0x7a, 0x5f, 0x1b, 0x99, 0xfb, 0x05, 0x1c, 0x94, + 0xae, 0xe7, 0x36, 0x21, 0xea, 0x0d, 0x94, 0xeb, 0x8d, 0x18, 0x1e, 0x0f, 0xee, 0xd9, 0xf1, 0xc1, + 0x91, 0x6a, 0x93, 0x2a, 0x1d, 0x51, 0xe6, 0xdb, 0x67, 0x73, 0x69, 0x65, 0xaf, 0x10, 0xe2, 0xbd, + 0xb5, 0xa2, 0xb8, 0x35, 0xb6, 0x11, 0x1a, 0x58, 0x42, 0xbf, 0x01, 0x38, 0xed, 0x5d, 0xcc, 0xf7, + 0x34, 0xd6, 0xe4, 0x3e, 0xbe, 0x9d, 0xe2, 0xbf, 0xc6, 0x63, 0xbb, 0x9a, 0xbb, 0x7c, 0xec, 0xb1, + 0xec, 0xf9, 0x0b, 0x2a, 0x7b, 0xcf, 0x16, 0x5e, 0x0a, 0x25, 0x08, 0xb7, 0xe8, 0xba, 0x6e, 0xd6, + 0x74, 0xb3, 0xe1, 0xa0, 0xb3, 0x03, 0xa7, 0x6e, 0xd3, 0xff, 0xa3, 0x3d, 0xf4, 0x40, 0x8e, 0xa1, + 0xc7, 0x70, 0xd2, 0x7b, 0x97, 0x51, 0x97, 0xa1, 0x21, 0xa0, 0xa1, 0xc6, 0xe7, 0xb8, 0xfb, 0xa7, + 0xd1, 0x42, 0x34, 0x9f, 0x4c, 0x90, 0x65, 0xff, 0x00, 0x07, 0xa5, 0xdf, 0x01, 0xaa, 0x0f, 0xb9, + 0x0e, 0x71, 0x8d, 0x38, 0x55, 0x5b, 0xe7, 0xff, 0xc5, 0x71, 0x3e, 0x8f, 0x5f, 0x34, 0xf5, 0x6a, + 0x13, 0x3b, 0x4d, 0xea, 0xb6, 0x6a, 0xd8, 0xa4, 0x0c, 0x57, 0x08, 0x76, 0x1d, 0x52, 0xc3, 0xba, + 0x89, 0xad, 0x96, 0x56, 0x25, 0x98, 0xd6, 0x31, 0x6b, 0x12, 0x5c, 0xa3, 0x55, 0xd7, 0x20, 0xa6, + 0xf8, 0xe7, 0x8e, 0xab, 0xd4, 0xf0, 0x06, 0xe7, 0xb3, 0xf7, 0xe1, 0xf2, 0x51, 0x35, 0xe5, 0x6d, + 0x66, 0x70, 0x01, 0x8f, 0x78, 0xee, 0x0a, 0x9f, 0xc3, 0xa5, 0x92, 0x49, 0x59, 0x93, 0xd8, 0x3e, + 0xc3, 0xa7, 0x3a, 0x6b, 0x46, 0xd2, 0xfd, 0x51, 0x4f, 0xf2, 0x47, 0x4d, 0xde, 0xd8, 0xfa, 0xcf, + 0xb1, 0x83, 0xd2, 0x77, 0x31, 0xc4, 0xe0, 0x42, 0x09, 0xaf, 0xeb, 0xcc, 0x0b, 0x30, 0x72, 0x36, + 0x9e, 0xc0, 0x53, 0x0d, 0xf5, 0xde, 0x46, 0xfe, 0x86, 0xf0, 0x06, 0x5b, 0x36, 0x7d, 0x4e, 0xaa, + 0x6c, 0xd4, 0x28, 0xb2, 0x69, 0x93, 0x9a, 0xe4, 0x63, 0x7f, 0x77, 0x3c, 0x74, 0x21, 0xbe, 0x26, + 0xbf, 0xbf, 0x12, 0x07, 0xb1, 0xf1, 0x42, 0x5a, 0xb3, 0xac, 0x96, 0x5e, 0xe5, 0xa9, 0x54, 0x9e, + 0x3b, 0xd4, 0x2c, 0x2c, 0x46, 0x67, 0xf6, 0xf3, 0x75, 0x4a, 0xf3, 0x86, 0x6e, 0x90, 0xe2, 0x00, + 0xb2, 0x38, 0x04, 0x69, 0xdf, 0x82, 0x67, 0xee, 0x1c, 0xe6, 0x3f, 0x1a, 0xc2, 0xa8, 0xae, 0x6f, + 0x27, 0xc3, 0xd2, 0xa9, 0x24, 0x78, 0xf6, 0x2e, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x88, 0x7d, + 0xbe, 0x6f, 0xd5, 0x12, 0x00, 0x00, } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.gw.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.gw.go index 213f5415fcd4..c62dbd038f6a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.gw.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: examples/examplepb/a_bit_of_everything.proto -// DO NOT EDIT! /* Package examplepb is a reverse proxy. @@ -23,10 +22,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray @@ -47,171 +48,171 @@ func request_ABitOfEverythingService_Create_0(ctx context.Context, marshaler run val, ok = pathParams["float_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "float_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "float_value") } protoReq.FloatValue, err = runtime.Float32(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "float_value", err) } val, ok = pathParams["double_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "double_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "double_value") } protoReq.DoubleValue, err = runtime.Float64(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "double_value", err) } val, ok = pathParams["int64_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "int64_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "int64_value") } protoReq.Int64Value, err = runtime.Int64(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "int64_value", err) } val, ok = pathParams["uint64_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "uint64_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uint64_value") } protoReq.Uint64Value, err = runtime.Uint64(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uint64_value", err) } val, ok = pathParams["int32_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "int32_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "int32_value") } protoReq.Int32Value, err = runtime.Int32(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "int32_value", err) } val, ok = pathParams["fixed64_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "fixed64_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "fixed64_value") } protoReq.Fixed64Value, err = runtime.Uint64(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "fixed64_value", err) } val, ok = pathParams["fixed32_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "fixed32_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "fixed32_value") } protoReq.Fixed32Value, err = runtime.Uint32(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "fixed32_value", err) } val, ok = pathParams["bool_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "bool_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "bool_value") } protoReq.BoolValue, err = runtime.Bool(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "bool_value", err) } val, ok = pathParams["string_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "string_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "string_value") } protoReq.StringValue, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "string_value", err) } val, ok = pathParams["uint32_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "uint32_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uint32_value") } protoReq.Uint32Value, err = runtime.Uint32(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uint32_value", err) } val, ok = pathParams["sfixed32_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "sfixed32_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sfixed32_value") } protoReq.Sfixed32Value, err = runtime.Int32(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sfixed32_value", err) } val, ok = pathParams["sfixed64_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "sfixed64_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sfixed64_value") } protoReq.Sfixed64Value, err = runtime.Int64(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sfixed64_value", err) } val, ok = pathParams["sint32_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "sint32_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sint32_value") } protoReq.Sint32Value, err = runtime.Int32(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sint32_value", err) } val, ok = pathParams["sint64_value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "sint64_value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sint64_value") } protoReq.Sint64Value, err = runtime.Int64(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sint64_value", err) } val, ok = pathParams["nonConventionalNameValue"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "nonConventionalNameValue") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "nonConventionalNameValue") } protoReq.NonConventionalNameValue, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "nonConventionalNameValue", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ABitOfEverythingService_Create_0); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Create(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -224,7 +225,7 @@ func request_ABitOfEverythingService_CreateBody_0(ctx context.Context, marshaler var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.CreateBody(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -245,13 +246,13 @@ func request_ABitOfEverythingService_Lookup_0(ctx context.Context, marshaler run val, ok = pathParams["uuid"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") } protoReq.Uuid, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uuid", err) } msg, err := client.Lookup(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -264,7 +265,7 @@ func request_ABitOfEverythingService_Update_0(ctx context.Context, marshaler run var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -276,13 +277,13 @@ func request_ABitOfEverythingService_Update_0(ctx context.Context, marshaler run val, ok = pathParams["uuid"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") } protoReq.Uuid, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uuid", err) } msg, err := client.Update(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -303,13 +304,13 @@ func request_ABitOfEverythingService_Delete_0(ctx context.Context, marshaler run val, ok = pathParams["uuid"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") } protoReq.Uuid, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uuid", err) } msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -317,6 +318,41 @@ func request_ABitOfEverythingService_Delete_0(ctx context.Context, marshaler run } +var ( + filter_ABitOfEverythingService_GetQuery_0 = &utilities.DoubleArray{Encoding: map[string]int{"uuid": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_ABitOfEverythingService_GetQuery_0(ctx context.Context, marshaler runtime.Marshaler, client ABitOfEverythingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ABitOfEverything + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["uuid"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "uuid") + } + + protoReq.Uuid, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "uuid", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ABitOfEverythingService_GetQuery_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetQuery(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + func request_ABitOfEverythingService_Echo_0(ctx context.Context, marshaler runtime.Marshaler, client ABitOfEverythingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq sub.StringMessage var metadata runtime.ServerMetadata @@ -330,13 +366,13 @@ func request_ABitOfEverythingService_Echo_0(ctx context.Context, marshaler runti val, ok = pathParams["value"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "value") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "value") } protoReq.Value, err = runtime.StringP(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "value", err) } msg, err := client.Echo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -349,7 +385,7 @@ func request_ABitOfEverythingService_Echo_1(ctx context.Context, marshaler runti var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.Value); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Echo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -366,7 +402,7 @@ func request_ABitOfEverythingService_Echo_2(ctx context.Context, marshaler runti var metadata runtime.ServerMetadata if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ABitOfEverythingService_Echo_2); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.Echo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -379,7 +415,7 @@ func request_ABitOfEverythingService_DeepPathEcho_0(ctx context.Context, marshal var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -391,13 +427,13 @@ func request_ABitOfEverythingService_DeepPathEcho_0(ctx context.Context, marshal val, ok = pathParams["single_nested.name"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "single_nested.name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "single_nested.name") } err = runtime.PopulateFieldFromPath(&protoReq, "single_nested.name", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "single_nested.name", err) } msg, err := client.DeepPathEcho(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -442,7 +478,15 @@ func RegisterABitOfEverythingServiceHandlerFromEndpoint(ctx context.Context, mux // RegisterABitOfEverythingServiceHandler registers the http handlers for service ABitOfEverythingService to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewABitOfEverythingServiceClient(conn) + return RegisterABitOfEverythingServiceHandlerClient(ctx, mux, NewABitOfEverythingServiceClient(conn)) +} + +// RegisterABitOfEverythingServiceHandler registers the http handlers for service ABitOfEverythingService to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ABitOfEverythingServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ABitOfEverythingServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ABitOfEverythingServiceClient" to call the correct interceptors. +func RegisterABitOfEverythingServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ABitOfEverythingServiceClient) error { mux.Handle("POST", pattern_ABitOfEverythingService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -457,18 +501,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Create_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Create_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -485,18 +530,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_CreateBody_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_CreateBody_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_CreateBody_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -513,18 +559,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Lookup_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Lookup_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Lookup_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -541,18 +588,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Update_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Update_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Update_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -569,18 +617,48 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Delete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Delete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_ABitOfEverythingService_GetQuery_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ABitOfEverythingService_GetQuery_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ABitOfEverythingService_GetQuery_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -597,18 +675,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Echo_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Echo_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Echo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -625,18 +704,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Echo_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Echo_1(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Echo_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -653,18 +733,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Echo_2(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Echo_2(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Echo_2(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -681,18 +762,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_DeepPathEcho_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_DeepPathEcho_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_DeepPathEcho_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -709,18 +791,19 @@ func RegisterABitOfEverythingServiceHandler(ctx context.Context, mux *runtime.Se }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_ABitOfEverythingService_Timeout_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_ABitOfEverythingService_Timeout_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_ABitOfEverythingService_Timeout_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -738,6 +821,8 @@ var ( pattern_ABitOfEverythingService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "example", "a_bit_of_everything", "uuid"}, "")) + pattern_ABitOfEverythingService_GetQuery_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "example", "a_bit_of_everything", "query", "uuid"}, "")) + pattern_ABitOfEverythingService_Echo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "example", "a_bit_of_everything", "echo", "value"}, "")) pattern_ABitOfEverythingService_Echo_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "example", "echo"}, "")) @@ -760,6 +845,8 @@ var ( forward_ABitOfEverythingService_Delete_0 = runtime.ForwardResponseMessage + forward_ABitOfEverythingService_GetQuery_0 = runtime.ForwardResponseMessage + forward_ABitOfEverythingService_Echo_0 = runtime.ForwardResponseMessage forward_ABitOfEverythingService_Echo_1 = runtime.ForwardResponseMessage diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.proto index 85b44edfe421..6250bdd43813 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.proto +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.proto @@ -4,13 +4,47 @@ package grpc.gateway.examples.examplepb; import "google/api/annotations.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/duration.proto"; import "examples/sub/message.proto"; import "examples/sub2/message.proto"; import "google/protobuf/timestamp.proto"; +import "protoc-gen-swagger/options/annotations.proto"; + +option (grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger) = { + info: { + title: "A Bit of Everything"; + version: "1.0"; + contact: { + name: "gRPC-Gateway project"; + url: "https://github.com/grpc-ecosystem/grpc-gateway"; + email: "none@example.com"; + }; + }; + // Overwriting host entry breaks tests, so this is not done here. + external_docs: { + url: "https://github.com/grpc-ecosystem/grpc-gateway"; + description: "More about gRPC-Gateway"; + } + schemes: HTTP; + schemes: HTTPS; + schemes: WSS; + consumes: "application/json"; + consumes: "application/x-foo-mime"; + produces: "application/json"; + produces: "application/x-foo-mime"; +}; + // Intentionaly complicated message type to cover much features of Protobuf. // NEXT ID: 27 message ABitOfEverything { + option (grpc.gateway.protoc_gen_swagger.options.openapiv2_schema) = { + external_docs: { + url: "https://github.com/grpc-ecosystem/grpc-gateway"; + description: "Find out more about ABitOfEverything"; + } + }; + // Nested is nested type. message Nested { // name is nested field. @@ -58,6 +92,9 @@ message ABitOfEverything { string nonConventionalNameValue = 26; google.protobuf.Timestamp timestamp_value = 27; + + // repeated enum value. it is comma-separated in query + repeated NumericEnum repeated_enum_value = 28; } // NumericEnum is one or zero. @@ -68,7 +105,18 @@ enum NumericEnum { ONE = 1; } +// ABitOfEverything service is used to validate that APIs with complicated +// proto messages and URL templates are still processed correctly. service ABitOfEverythingService { + + option (grpc.gateway.protoc_gen_swagger.options.openapiv2_tag) = { + description: "ABitOfEverythingService description -- which should not be used in place of the documentation comment!" + external_docs: { + url: "https://github.com/grpc-ecosystem/grpc-gateway"; + description: "Find out more about EchoService"; + } + }; + rpc Create(ABitOfEverything) returns (ABitOfEverything) { // TODO add enum_value option (google.api.http) = { @@ -97,6 +145,25 @@ service ABitOfEverythingService { delete: "/v1/example/a_bit_of_everything/{uuid}" }; } + rpc GetQuery(ABitOfEverything) returns (google.protobuf.Empty) { + option (google.api.http) = { + get: "/v1/example/a_bit_of_everything/query/{uuid}" + }; + option (grpc.gateway.protoc_gen_swagger.options.openapiv2_operation) = { + deprecated: true // For testing purposes. + external_docs: { + url: "https://github.com/grpc-ecosystem/grpc-gateway"; + description: "Find out more about GetQuery"; + } + }; + } + // Echo allows posting a StringMessage value. + // + // It also exposes multiple bindings. + // + // This makes it useful when validating that the OpenAPI v2 API + // description exposes documentation correctly on all paths + // defined as additional_bindings in the proto. rpc Echo(grpc.gateway.examples.sub.StringMessage) returns (grpc.gateway.examples.sub.StringMessage) { option (google.api.http) = { get: "/v1/example/a_bit_of_everything/echo/{value}" @@ -108,6 +175,12 @@ service ABitOfEverythingService { get: "/v2/example/echo" } }; + option (grpc.gateway.protoc_gen_swagger.options.openapiv2_operation) = { + external_docs: { + url: "https://github.com/grpc-ecosystem/grpc-gateway"; + description: "Find out more Echo"; + } + }; } rpc DeepPathEcho(ABitOfEverything) returns (ABitOfEverything) { option (google.api.http) = { @@ -115,9 +188,14 @@ service ABitOfEverythingService { body: "*" }; } + rpc NoBindings(google.protobuf.Duration) returns (google.protobuf.Empty) {} rpc Timeout(google.protobuf.Empty) returns (google.protobuf.Empty) { option (google.api.http) = { get: "/v2/example/timeout", }; } } + +service AnotherServiceWithNoBindings { + rpc NoBindings(google.protobuf.Empty) returns (google.protobuf.Empty) {} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.swagger.json index e8ad64f11d25..c42cd34ed13a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.swagger.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/a_bit_of_everything.swagger.json @@ -1,18 +1,26 @@ { "swagger": "2.0", "info": { - "title": "examples/examplepb/a_bit_of_everything.proto", - "version": "version not set" + "title": "A Bit of Everything", + "version": "1.0", + "contact": { + "name": "gRPC-Gateway project", + "url": "https://github.com/grpc-ecosystem/grpc-gateway", + "email": "none@example.com" + } }, "schemes": [ "http", - "https" + "https", + "wss" ], "consumes": [ - "application/json" + "application/json", + "application/x-foo-mime" ], "produces": [ - "application/json" + "application/json", + "application/x-foo-mime" ], "paths": { "/v1/example/a_bit_of_everything": { @@ -43,6 +51,8 @@ }, "/v1/example/a_bit_of_everything/echo/{value}": { "get": { + "summary": "Echo allows posting a StringMessage value.", + "description": "It also exposes multiple bindings.\n\nThis makes it useful when validating that the OpenAPI v2 API\ndescription exposes documentation correctly on all paths\ndefined as additional_bindings in the proto.", "operationId": "Echo", "responses": { "200": { @@ -57,13 +67,223 @@ "name": "value", "in": "path", "required": true, + "type": "string" + } + ], + "tags": [ + "ABitOfEverythingService" + ], + "externalDocs": { + "description": "Find out more Echo", + "url": "https://github.com/grpc-ecosystem/grpc-gateway" + } + } + }, + "/v1/example/a_bit_of_everything/query/{uuid}": { + "get": { + "operationId": "GetQuery", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/protobufEmpty" + } + } + }, + "parameters": [ + { + "name": "uuid", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "single_nested.name", + "description": "name is nested field.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "single_nested.amount", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + }, + { + "name": "single_nested.ok", + "description": " - FALSE: FALSE is false.\n - TRUE: TRUE is true.", + "in": "query", + "required": false, + "type": "string", + "enum": [ + "FALSE", + "TRUE" + ], + "default": "FALSE" + }, + { + "name": "float_value", + "in": "query", + "required": false, + "type": "number", + "format": "float" + }, + { + "name": "double_value", + "in": "query", + "required": false, + "type": "number", + "format": "double" + }, + { + "name": "int64_value", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "uint64_value", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "int32_value", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "fixed64_value", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "fixed32_value", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + }, + { + "name": "bool_value", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "string_value", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "uint32_value", + "description": "TODO(yugui) add bytes_value.", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + }, + { + "name": "enum_value", + "description": " - ZERO: ZERO means 0\n - ONE: ONE means 1", + "in": "query", + "required": false, + "type": "string", + "enum": [ + "ZERO", + "ONE" + ], + "default": "ZERO" + }, + { + "name": "sfixed32_value", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "sfixed64_value", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "sint32_value", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "sint64_value", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "repeated_string_value", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string" + } + }, + { + "name": "oneof_string", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "nonConventionalNameValue", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "timestamp_value", + "in": "query", + "required": false, "type": "string", - "format": "string" + "format": "date-time" + }, + { + "name": "repeated_enum_value", + "description": "repeated enum value. it is comma-separated in query.\n\n - ZERO: ZERO means 0\n - ONE: ONE means 1", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "ZERO", + "ONE" + ] + } } ], "tags": [ "ABitOfEverythingService" - ] + ], + "deprecated": true, + "externalDocs": { + "description": "Find out more about GetQuery", + "url": "https://github.com/grpc-ecosystem/grpc-gateway" + } } }, "/v1/example/a_bit_of_everything/{float_value}/{double_value}/{int64_value}/separator/{uint64_value}/{int32_value}/{fixed64_value}/{fixed32_value}/{bool_value}/{string_value}/{uint32_value}/{sfixed32_value}/{sfixed64_value}/{sint32_value}/{sint64_value}/{nonConventionalNameValue}": { @@ -138,8 +358,7 @@ "name": "string_value", "in": "path", "required": true, - "type": "string", - "format": "string" + "type": "string" }, { "name": "uint32_value", @@ -180,8 +399,7 @@ "name": "nonConventionalNameValue", "in": "path", "required": true, - "type": "string", - "format": "string" + "type": "string" } ], "tags": [ @@ -205,8 +423,7 @@ "name": "single_nested.name", "in": "path", "required": true, - "type": "string", - "format": "string" + "type": "string" }, { "name": "body", @@ -238,8 +455,7 @@ "name": "uuid", "in": "path", "required": true, - "type": "string", - "format": "string" + "type": "string" } ], "tags": [ @@ -261,8 +477,7 @@ "name": "uuid", "in": "path", "required": true, - "type": "string", - "format": "string" + "type": "string" } ], "tags": [ @@ -284,8 +499,7 @@ "name": "uuid", "in": "path", "required": true, - "type": "string", - "format": "string" + "type": "string" }, { "name": "body", @@ -303,7 +517,9 @@ }, "/v2/example/echo": { "get": { - "operationId": "Echo", + "summary": "Echo allows posting a StringMessage value.", + "description": "It also exposes multiple bindings.\n\nThis makes it useful when validating that the OpenAPI v2 API\ndescription exposes documentation correctly on all paths\ndefined as additional_bindings in the proto.", + "operationId": "Echo3", "responses": { "200": { "description": "", @@ -312,12 +528,26 @@ } } }, + "parameters": [ + { + "name": "value", + "in": "query", + "required": false, + "type": "string" + } + ], "tags": [ "ABitOfEverythingService" - ] + ], + "externalDocs": { + "description": "Find out more Echo", + "url": "https://github.com/grpc-ecosystem/grpc-gateway" + } }, "post": { - "operationId": "Echo", + "summary": "Echo allows posting a StringMessage value.", + "description": "It also exposes multiple bindings.\n\nThis makes it useful when validating that the OpenAPI v2 API\ndescription exposes documentation correctly on all paths\ndefined as additional_bindings in the proto.", + "operationId": "Echo2", "responses": { "200": { "description": "", @@ -332,14 +562,17 @@ "in": "body", "required": true, "schema": { - "type": "string", - "format": "string" + "type": "string" } } ], "tags": [ "ABitOfEverythingService" - ] + ], + "externalDocs": { + "description": "Find out more Echo", + "url": "https://github.com/grpc-ecosystem/grpc-gateway" + } } }, "/v2/example/timeout": { @@ -363,15 +596,14 @@ "ABitOfEverythingNested": { "type": "object", "properties": { - "amount": { - "type": "integer", - "format": "int64" - }, "name": { "type": "string", - "format": "string", "description": "name is nested field." }, + "amount": { + "type": "integer", + "format": "int64" + }, "ok": { "$ref": "#/definitions/NestedDeepEnum" } @@ -390,79 +622,60 @@ "examplepbABitOfEverything": { "type": "object", "properties": { - "bool_value": { - "type": "boolean", - "format": "boolean" + "single_nested": { + "$ref": "#/definitions/ABitOfEverythingNested" + }, + "uuid": { + "type": "string" + }, + "nested": { + "type": "array", + "items": { + "$ref": "#/definitions/ABitOfEverythingNested" + } + }, + "float_value": { + "type": "number", + "format": "float" }, "double_value": { "type": "number", "format": "double" }, - "enum_value": { - "$ref": "#/definitions/examplepbNumericEnum" - }, - "fixed32_value": { - "type": "integer", + "int64_value": { + "type": "string", "format": "int64" }, - "fixed64_value": { + "uint64_value": { "type": "string", "format": "uint64" }, - "float_value": { - "type": "number", - "format": "float" - }, "int32_value": { "type": "integer", "format": "int32" }, - "int64_value": { + "fixed64_value": { "type": "string", - "format": "int64" - }, - "map_value": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/examplepbNumericEnum" - } - }, - "mapped_nested_value": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/ABitOfEverythingNested" - } - }, - "mapped_string_value": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "string" - } + "format": "uint64" }, - "nested": { - "type": "array", - "items": { - "$ref": "#/definitions/ABitOfEverythingNested" - } + "fixed32_value": { + "type": "integer", + "format": "int64" }, - "nonConventionalNameValue": { - "type": "string", - "format": "string" + "bool_value": { + "type": "boolean", + "format": "boolean" }, - "oneof_empty": { - "$ref": "#/definitions/protobufEmpty" + "string_value": { + "type": "string" }, - "oneof_string": { - "type": "string", - "format": "string" + "uint32_value": { + "type": "integer", + "format": "int64", + "title": "TODO(yugui) add bytes_value" }, - "repeated_string_value": { - "type": "array", - "items": { - "type": "string", - "format": "string" - } + "enum_value": { + "$ref": "#/definitions/examplepbNumericEnum" }, "sfixed32_value": { "type": "integer", @@ -472,9 +685,6 @@ "type": "string", "format": "int64" }, - "single_nested": { - "$ref": "#/definitions/ABitOfEverythingNested" - }, "sint32_value": { "type": "integer", "format": "int32" @@ -483,29 +693,56 @@ "type": "string", "format": "int64" }, - "string_value": { - "type": "string", - "format": "string" + "repeated_string_value": { + "type": "array", + "items": { + "type": "string" + } }, - "timestamp_value": { - "type": "string", - "format": "date-time" + "oneof_empty": { + "$ref": "#/definitions/protobufEmpty" }, - "uint32_value": { - "type": "integer", - "format": "int64", - "title": "TODO(yugui) add bytes_value" + "oneof_string": { + "type": "string" }, - "uint64_value": { - "type": "string", - "format": "uint64" + "map_value": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/examplepbNumericEnum" + } }, - "uuid": { + "mapped_string_value": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mapped_nested_value": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/ABitOfEverythingNested" + } + }, + "nonConventionalNameValue": { + "type": "string" + }, + "timestamp_value": { "type": "string", - "format": "string" + "format": "date-time" + }, + "repeated_enum_value": { + "type": "array", + "items": { + "$ref": "#/definitions/examplepbNumericEnum" + }, + "title": "repeated enum value. it is comma-separated in query" } }, - "title": "Intentionaly complicated message type to cover much features of Protobuf.\nNEXT ID: 27" + "title": "Intentionaly complicated message type to cover much features of Protobuf.\nNEXT ID: 27", + "externalDocs": { + "description": "Find out more about ABitOfEverything", + "url": "https://github.com/grpc-ecosystem/grpc-gateway" + } }, "examplepbNumericEnum": { "type": "string", @@ -521,23 +758,17 @@ "description": "service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "title": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:" }, - "sub2IdMessage": { - "type": "object", - "properties": { - "uuid": { - "type": "string", - "format": "string" - } - } - }, "subStringMessage": { "type": "object", "properties": { "value": { - "type": "string", - "format": "string" + "type": "string" } } } + }, + "externalDocs": { + "description": "More about gRPC-Gateway", + "url": "https://github.com/grpc-ecosystem/grpc-gateway" } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.go index 756fe4c8d983..f4b5f3b5caa6 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/examplepb/echo_service.proto -// DO NOT EDIT! /* Package examplepb is a generated protocol buffer package. @@ -30,7 +29,7 @@ package examplepb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" +import _ "google.golang.org/genproto/googleapis/api/annotations" import ( context "golang.org/x/net/context" @@ -51,7 +50,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // SimpleMessage represents a simple message sent to the Echo service. type SimpleMessage struct { // Id represents the message identifier. - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Num int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` } func (m *SimpleMessage) Reset() { *m = SimpleMessage{} } @@ -59,6 +59,20 @@ func (m *SimpleMessage) String() string { return proto.CompactTextStr func (*SimpleMessage) ProtoMessage() {} func (*SimpleMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *SimpleMessage) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *SimpleMessage) GetNum() int64 { + if m != nil { + return m.Num + } + return 0 +} + func init() { proto.RegisterType((*SimpleMessage)(nil), "grpc.gateway.examples.examplepb.SimpleMessage") } @@ -181,20 +195,22 @@ var _EchoService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("examples/examplepb/echo_service.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 229 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xad, 0x48, 0xcc, + // 257 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0xd6, 0x87, 0x32, 0x0a, 0x92, 0xf4, 0x53, 0x93, 0x33, 0xf2, 0xe3, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xe4, 0xd3, 0x8b, 0x0a, 0x92, 0xf5, 0xd2, 0x13, 0x4b, 0x52, 0xcb, 0x13, 0x2b, 0xf5, 0x60, 0x7a, 0xf4, 0xe0, 0x7a, 0xa4, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, 0xf3, 0xf2, - 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xda, 0x95, 0xe4, 0xb9, 0x78, 0x83, 0x33, + 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xda, 0x95, 0x0c, 0xb9, 0x78, 0x83, 0x33, 0x41, 0x2a, 0x7d, 0x53, 0x8b, 0x8b, 0x13, 0xd3, 0x53, 0x85, 0xf8, 0xb8, 0x98, 0x32, 0x53, 0x24, - 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x32, 0x53, 0x8c, 0x96, 0x30, 0x71, 0x71, 0xbb, 0x26, - 0x67, 0xe4, 0x07, 0x43, 0x6c, 0x15, 0x6a, 0x65, 0xe4, 0x62, 0x01, 0xf1, 0x85, 0xf4, 0xf4, 0x08, - 0xd8, 0xac, 0x87, 0x62, 0xb0, 0x14, 0x89, 0xea, 0x95, 0x64, 0x9b, 0x2e, 0x3f, 0x99, 0xcc, 0x24, - 0xae, 0x24, 0xaa, 0x5f, 0x66, 0x08, 0x0b, 0x02, 0x70, 0x00, 0xe8, 0x57, 0x67, 0xa6, 0xd4, 0x0a, - 0xf5, 0x30, 0x72, 0x71, 0x80, 0xdc, 0xe1, 0x94, 0x9f, 0x52, 0x49, 0x73, 0xb7, 0x28, 0x80, 0xdd, - 0x22, 0x85, 0xe9, 0x96, 0xf8, 0xa4, 0xfc, 0x94, 0x4a, 0x2b, 0x46, 0x2d, 0x27, 0xee, 0x28, 0x4e, - 0xb8, 0xe6, 0x24, 0x36, 0x70, 0xd8, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x26, 0x96, 0x37, - 0xac, 0xc3, 0x01, 0x00, 0x00, + 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x32, 0x53, 0x84, 0x04, 0xb8, 0x98, 0xf3, 0x4a, 0x73, + 0x25, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0x40, 0x4c, 0xa3, 0xc3, 0x4c, 0x5c, 0xdc, 0xae, 0xc9, + 0x19, 0xf9, 0xc1, 0x10, 0x77, 0x08, 0x2d, 0x61, 0xe4, 0x62, 0x01, 0xf1, 0x85, 0xf4, 0xf4, 0x08, + 0xb8, 0x45, 0x0f, 0xc5, 0x2a, 0x29, 0x12, 0xd5, 0x2b, 0xd9, 0x34, 0x5d, 0x7e, 0x32, 0x99, 0xc9, + 0x4c, 0x49, 0x54, 0xbf, 0xcc, 0x10, 0x16, 0x28, 0xe0, 0x20, 0xd1, 0xaf, 0xce, 0x4c, 0xa9, 0x8d, + 0x92, 0x15, 0x92, 0xc6, 0x2a, 0xa1, 0x5f, 0x9d, 0x57, 0x9a, 0x5b, 0x2b, 0xd4, 0xc3, 0xc8, 0xc5, + 0x01, 0x72, 0xa6, 0x53, 0x7e, 0x4a, 0x25, 0xcd, 0x9d, 0xaa, 0x00, 0x76, 0xaa, 0x14, 0xa6, 0x53, + 0xe3, 0x93, 0xf2, 0x53, 0x2a, 0xad, 0x18, 0xb5, 0x9c, 0xb8, 0xa3, 0x38, 0xe1, 0x9a, 0x93, 0xd8, + 0xc0, 0x91, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x6e, 0x70, 0xce, 0xf4, 0x01, 0x00, + 0x00, } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.gw.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.gw.go index 19130064d5c8..100b0dd030d4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.gw.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: examples/examplepb/echo_service.proto -// DO NOT EDIT! /* Package examplepb is a reverse proxy. @@ -20,13 +19,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray +var ( + filter_EchoService_Echo_0 = &utilities.DoubleArray{Encoding: map[string]int{"id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + func request_EchoService_Echo_0(ctx context.Context, marshaler runtime.Marshaler, client EchoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq SimpleMessage var metadata runtime.ServerMetadata @@ -40,13 +45,55 @@ func request_EchoService_Echo_0(ctx context.Context, marshaler runtime.Marshaler val, ok = pathParams["id"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") } protoReq.Id, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_EchoService_Echo_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Echo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_EchoService_Echo_1(ctx context.Context, marshaler runtime.Marshaler, client EchoServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq SimpleMessage + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + val, ok = pathParams["num"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "num") + } + + protoReq.Num, err = runtime.Int64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "num", err) } msg, err := client.Echo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -59,7 +106,7 @@ func request_EchoService_EchoBody_0(ctx context.Context, marshaler runtime.Marsh var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.EchoBody(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -95,7 +142,15 @@ func RegisterEchoServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se // RegisterEchoServiceHandler registers the http handlers for service EchoService to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterEchoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewEchoServiceClient(conn) + return RegisterEchoServiceHandlerClient(ctx, mux, NewEchoServiceClient(conn)) +} + +// RegisterEchoServiceHandler registers the http handlers for service EchoService to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "EchoServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "EchoServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "EchoServiceClient" to call the correct interceptors. +func RegisterEchoServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client EchoServiceClient) error { mux.Handle("POST", pattern_EchoService_Echo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -110,18 +165,48 @@ func RegisterEchoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_EchoService_Echo_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_EchoService_Echo_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_EchoService_Echo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_EchoService_Echo_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_EchoService_Echo_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_EchoService_Echo_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -138,18 +223,19 @@ func RegisterEchoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_EchoService_EchoBody_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_EchoService_EchoBody_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_EchoService_EchoBody_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -159,11 +245,15 @@ func RegisterEchoServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn var ( pattern_EchoService_Echo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "example", "echo", "id"}, "")) + pattern_EchoService_Echo_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"v1", "example", "echo", "id", "num"}, "")) + pattern_EchoService_EchoBody_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "example", "echo_body"}, "")) ) var ( forward_EchoService_Echo_0 = runtime.ForwardResponseMessage + forward_EchoService_Echo_1 = runtime.ForwardResponseMessage + forward_EchoService_EchoBody_0 = runtime.ForwardResponseMessage ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.proto index 44555e8553f4..0a7a6fa7f49d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.proto +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.proto @@ -13,6 +13,7 @@ import "google/api/annotations.proto"; message SimpleMessage { // Id represents the message identifier. string id = 1; + int64 num = 2; } // Echo service responds to incoming echo requests. @@ -24,6 +25,9 @@ service EchoService { rpc Echo(SimpleMessage) returns (SimpleMessage) { option (google.api.http) = { post: "/v1/example/echo/{id}" + additional_bindings { + get: "/v1/example/echo/{id}/{num}" + } }; } // EchoBody method receives a simple message and returns it. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.swagger.json b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.swagger.json index f6c6d8b98145..cf6179322129 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.swagger.json +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/echo_service.swagger.json @@ -34,8 +34,40 @@ "name": "id", "in": "path", "required": true, + "type": "string" + } + ], + "tags": [ + "EchoService" + ] + } + }, + "/v1/example/echo/{id}/{num}": { + "get": { + "summary": "Echo method receives a simple message and returns it.", + "description": "The message posted as the id parameter will also be\nreturned.", + "operationId": "Echo2", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/examplepbSimpleMessage" + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "num", + "in": "path", + "required": true, "type": "string", - "format": "string" + "format": "int64" } ], "tags": [ @@ -77,8 +109,11 @@ "properties": { "id": { "type": "string", - "format": "string", "description": "Id represents the message identifier." + }, + "num": { + "type": "string", + "format": "int64" } }, "description": "SimpleMessage represents a simple message sent to the Echo service." diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.go index f6620070863d..421c9a87a20c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.go @@ -1,13 +1,12 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/examplepb/flow_combination.proto -// DO NOT EDIT! package examplepb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" +import _ "google.golang.org/genproto/googleapis/api/annotations" import ( context "golang.org/x/net/context" @@ -38,6 +37,27 @@ func (m *NonEmptyProto) String() string { return proto.CompactTextStr func (*NonEmptyProto) ProtoMessage() {} func (*NonEmptyProto) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } +func (m *NonEmptyProto) GetA() string { + if m != nil { + return m.A + } + return "" +} + +func (m *NonEmptyProto) GetB() string { + if m != nil { + return m.B + } + return "" +} + +func (m *NonEmptyProto) GetC() string { + if m != nil { + return m.C + } + return "" +} + type UnaryProto struct { Str string `protobuf:"bytes,1,opt,name=str" json:"str,omitempty"` } @@ -47,6 +67,13 @@ func (m *UnaryProto) String() string { return proto.CompactTextString func (*UnaryProto) ProtoMessage() {} func (*UnaryProto) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } +func (m *UnaryProto) GetStr() string { + if m != nil { + return m.Str + } + return "" +} + type NestedProto struct { A *UnaryProto `protobuf:"bytes,1,opt,name=a" json:"a,omitempty"` B string `protobuf:"bytes,2,opt,name=b" json:"b,omitempty"` @@ -65,6 +92,20 @@ func (m *NestedProto) GetA() *UnaryProto { return nil } +func (m *NestedProto) GetB() string { + if m != nil { + return m.B + } + return "" +} + +func (m *NestedProto) GetC() string { + if m != nil { + return m.C + } + return "" +} + type SingleNestedProto struct { A *UnaryProto `protobuf:"bytes,1,opt,name=a" json:"a,omitempty"` } @@ -637,7 +678,7 @@ func init() { proto.RegisterFile("examples/examplepb/flow_combination.proto", fi var fileDescriptor3 = []byte{ // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x96, 0x3f, 0x8f, 0x12, 0x4f, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x3f, 0x8f, 0x12, 0x4f, 0x18, 0xc7, 0xf3, 0x70, 0xc9, 0x2f, 0xb9, 0xe1, 0xfe, 0x70, 0xcb, 0x2f, 0x08, 0x1c, 0x1e, 0x77, 0xe3, 0x25, 0xe2, 0xbf, 0x5d, 0x82, 0xd5, 0x51, 0x9e, 0xd1, 0x92, 0x5c, 0xb8, 0xd8, 0x6c, 0x63, 0x66, 0x87, 0x15, 0x48, 0x60, 0x67, 0x6e, 0x77, 0x0d, 0x5e, 0x08, 0x31, 0xb1, 0xb1, 0xb4, 0xf0, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.gw.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.gw.go index af9da8d5f95f..a7b9e3b2d5c5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.gw.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/flow_combination.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: examples/examplepb/flow_combination.proto -// DO NOT EDIT! /* Package examplepb is a reverse proxy. @@ -20,10 +19,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray @@ -69,7 +70,7 @@ func request_FlowCombination_StreamEmptyRpc_0(ctx context.Context, marshaler run } if err != nil { grpclog.Printf("Failed to decode request: %v", err) - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err = stream.Send(&protoReq); err != nil { grpclog.Printf("Failed to send request: %v", err) @@ -151,7 +152,7 @@ func request_FlowCombination_RpcBodyRpc_0(ctx context.Context, marshaler runtime var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -172,35 +173,35 @@ func request_FlowCombination_RpcBodyRpc_1(ctx context.Context, marshaler runtime val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } val, ok = pathParams["b"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "b") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "b") } protoReq.B, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "b", err) } val, ok = pathParams["c"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "c") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "c") } protoReq.C, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "c", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -217,7 +218,7 @@ func request_FlowCombination_RpcBodyRpc_2(ctx context.Context, marshaler runtime var metadata runtime.ServerMetadata if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyRpc_2); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -230,7 +231,7 @@ func request_FlowCombination_RpcBodyRpc_3(ctx context.Context, marshaler runtime var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -242,24 +243,24 @@ func request_FlowCombination_RpcBodyRpc_3(ctx context.Context, marshaler runtime val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } val, ok = pathParams["b"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "b") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "b") } protoReq.B, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "b", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -276,11 +277,11 @@ func request_FlowCombination_RpcBodyRpc_4(ctx context.Context, marshaler runtime var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyRpc_4); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -297,7 +298,7 @@ func request_FlowCombination_RpcBodyRpc_5(ctx context.Context, marshaler runtime var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -309,17 +310,17 @@ func request_FlowCombination_RpcBodyRpc_5(ctx context.Context, marshaler runtime val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyRpc_5); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -344,17 +345,17 @@ func request_FlowCombination_RpcBodyRpc_6(ctx context.Context, marshaler runtime val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyRpc_6); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcBodyRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -379,17 +380,17 @@ func request_FlowCombination_RpcPathSingleNestedRpc_0(ctx context.Context, marsh val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathSingleNestedRpc_0); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcPathSingleNestedRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -406,7 +407,7 @@ func request_FlowCombination_RpcPathNestedRpc_0(ctx context.Context, marshaler r var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -418,28 +419,28 @@ func request_FlowCombination_RpcPathNestedRpc_0(ctx context.Context, marshaler r val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } val, ok = pathParams["b"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "b") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "b") } protoReq.B, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "b", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathNestedRpc_0); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcPathNestedRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -464,17 +465,17 @@ func request_FlowCombination_RpcPathNestedRpc_1(ctx context.Context, marshaler r val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathNestedRpc_1); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcPathNestedRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -491,7 +492,7 @@ func request_FlowCombination_RpcPathNestedRpc_2(ctx context.Context, marshaler r var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -503,17 +504,17 @@ func request_FlowCombination_RpcPathNestedRpc_2(ctx context.Context, marshaler r val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathNestedRpc_2); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.RpcPathNestedRpc(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -526,7 +527,7 @@ func request_FlowCombination_RpcBodyStream_0(ctx context.Context, marshaler runt var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -555,35 +556,35 @@ func request_FlowCombination_RpcBodyStream_1(ctx context.Context, marshaler runt val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } val, ok = pathParams["b"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "b") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "b") } protoReq.B, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "b", err) } val, ok = pathParams["c"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "c") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "c") } protoReq.C, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "c", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -608,7 +609,7 @@ func request_FlowCombination_RpcBodyStream_2(ctx context.Context, marshaler runt var metadata runtime.ServerMetadata if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyStream_2); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -629,7 +630,7 @@ func request_FlowCombination_RpcBodyStream_3(ctx context.Context, marshaler runt var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -641,24 +642,24 @@ func request_FlowCombination_RpcBodyStream_3(ctx context.Context, marshaler runt val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } val, ok = pathParams["b"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "b") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "b") } protoReq.B, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "b", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -683,11 +684,11 @@ func request_FlowCombination_RpcBodyStream_4(ctx context.Context, marshaler runt var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyStream_4); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -712,7 +713,7 @@ func request_FlowCombination_RpcBodyStream_5(ctx context.Context, marshaler runt var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -724,17 +725,17 @@ func request_FlowCombination_RpcBodyStream_5(ctx context.Context, marshaler runt val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyStream_5); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -767,17 +768,17 @@ func request_FlowCombination_RpcBodyStream_6(ctx context.Context, marshaler runt val, ok = pathParams["a"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a") } protoReq.A, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcBodyStream_6); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcBodyStream(ctx, &protoReq) @@ -810,17 +811,17 @@ func request_FlowCombination_RpcPathSingleNestedStream_0(ctx context.Context, ma val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathSingleNestedStream_0); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcPathSingleNestedStream(ctx, &protoReq) @@ -845,7 +846,7 @@ func request_FlowCombination_RpcPathNestedStream_0(ctx context.Context, marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -857,28 +858,28 @@ func request_FlowCombination_RpcPathNestedStream_0(ctx context.Context, marshale val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } val, ok = pathParams["b"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "b") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "b") } protoReq.B, err = runtime.String(val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "b", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathNestedStream_0); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcPathNestedStream(ctx, &protoReq) @@ -911,17 +912,17 @@ func request_FlowCombination_RpcPathNestedStream_1(ctx context.Context, marshale val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathNestedStream_1); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcPathNestedStream(ctx, &protoReq) @@ -946,7 +947,7 @@ func request_FlowCombination_RpcPathNestedStream_2(ctx context.Context, marshale var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq.C); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } var ( @@ -958,17 +959,17 @@ func request_FlowCombination_RpcPathNestedStream_2(ctx context.Context, marshale val, ok = pathParams["a.str"] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "a.str") } err = runtime.PopulateFieldFromPath(&protoReq, "a.str", val) if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "a.str", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_FlowCombination_RpcPathNestedStream_2); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.RpcPathNestedStream(ctx, &protoReq) @@ -1012,7 +1013,15 @@ func RegisterFlowCombinationHandlerFromEndpoint(ctx context.Context, mux *runtim // RegisterFlowCombinationHandler registers the http handlers for service FlowCombination to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewFlowCombinationClient(conn) + return RegisterFlowCombinationHandlerClient(ctx, mux, NewFlowCombinationClient(conn)) +} + +// RegisterFlowCombinationHandler registers the http handlers for service FlowCombination to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "FlowCombinationClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "FlowCombinationClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "FlowCombinationClient" to call the correct interceptors. +func RegisterFlowCombinationHandlerClient(ctx context.Context, mux *runtime.ServeMux, client FlowCombinationClient) error { mux.Handle("POST", pattern_FlowCombination_RpcEmptyRpc_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1027,18 +1036,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcEmptyRpc_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcEmptyRpc_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcEmptyRpc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1055,18 +1065,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcEmptyStream_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcEmptyStream_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcEmptyStream_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1083,18 +1094,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_StreamEmptyRpc_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_StreamEmptyRpc_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_StreamEmptyRpc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1111,18 +1123,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_StreamEmptyStream_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_StreamEmptyStream_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_StreamEmptyStream_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1139,18 +1152,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1167,18 +1181,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_1(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1195,18 +1210,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_2(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_2(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_2(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1223,18 +1239,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_3(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_3(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_3(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1251,18 +1268,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_4(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_4(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_4(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1279,18 +1297,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_5(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_5(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_5(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1307,18 +1326,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyRpc_6(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyRpc_6(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyRpc_6(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1335,18 +1355,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathSingleNestedRpc_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathSingleNestedRpc_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathSingleNestedRpc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1363,18 +1384,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathNestedRpc_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathNestedRpc_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathNestedRpc_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1391,18 +1413,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathNestedRpc_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathNestedRpc_1(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathNestedRpc_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1419,18 +1442,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathNestedRpc_2(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathNestedRpc_2(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathNestedRpc_2(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1447,18 +1471,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1475,18 +1500,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_1(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_1(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1503,18 +1529,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_2(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_2(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_2(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1531,18 +1558,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_3(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_3(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_3(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1559,18 +1587,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_4(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_4(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_4(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1587,18 +1616,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_5(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_5(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_5(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1615,18 +1645,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcBodyStream_6(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcBodyStream_6(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcBodyStream_6(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1643,18 +1674,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathSingleNestedStream_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathSingleNestedStream_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathSingleNestedStream_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1671,18 +1703,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathNestedStream_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathNestedStream_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathNestedStream_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1699,18 +1732,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathNestedStream_1(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathNestedStream_1(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathNestedStream_1(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -1727,18 +1761,19 @@ func RegisterFlowCombinationHandler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_FlowCombination_RpcPathNestedStream_2(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_FlowCombination_RpcPathNestedStream_2(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_FlowCombination_RpcPathNestedStream_2(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.go index 9bc4de296f16..71066ccd9646 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.go @@ -1,13 +1,12 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/examplepb/stream.proto -// DO NOT EDIT! package examplepb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import _ "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" +import _ "google.golang.org/genproto/googleapis/api/annotations" import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" import grpc_gateway_examples_sub "github.com/grpc-ecosystem/grpc-gateway/examples/sub" @@ -256,7 +255,7 @@ func init() { proto.RegisterFile("examples/examplepb/stream.proto", fileDescript var fileDescriptor2 = []byte{ // 314 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x90, 0xbf, 0x4a, 0x43, 0x31, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0xbf, 0x4a, 0x43, 0x31, 0x14, 0xc6, 0xb9, 0x2a, 0xa2, 0x11, 0x97, 0x0c, 0x0e, 0x51, 0x28, 0x16, 0xc1, 0x2a, 0x92, 0xb4, 0xba, 0xb9, 0x59, 0xe9, 0xa6, 0x38, 0x74, 0x73, 0x29, 0xc9, 0xe5, 0x34, 0x0d, 0xbd, 0xf7, 0x26, 0x24, 0xe7, 0x56, 0x0b, 0x4e, 0x8e, 0xae, 0x7d, 0x11, 0xdf, 0xc5, 0x57, 0xf0, 0x41, 0xa4, 0xf7, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.gw.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.gw.go index f256228759af..91a9065f4bd9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.gw.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/examplepb/stream.pb.gw.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: examples/examplepb/stream.proto -// DO NOT EDIT! /* Package examplepb is a reverse proxy. @@ -22,10 +21,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray @@ -45,7 +46,7 @@ func request_StreamService_BulkCreate_0(ctx context.Context, marshaler runtime.M } if err != nil { grpclog.Printf("Failed to decode request: %v", err) - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err = stream.Send(&protoReq); err != nil { grpclog.Printf("Failed to send request: %v", err) @@ -167,7 +168,15 @@ func RegisterStreamServiceHandlerFromEndpoint(ctx context.Context, mux *runtime. // RegisterStreamServiceHandler registers the http handlers for service StreamService to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterStreamServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewStreamServiceClient(conn) + return RegisterStreamServiceHandlerClient(ctx, mux, NewStreamServiceClient(conn)) +} + +// RegisterStreamServiceHandler registers the http handlers for service StreamService to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "StreamServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "StreamServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "StreamServiceClient" to call the correct interceptors. +func RegisterStreamServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client StreamServiceClient) error { mux.Handle("POST", pattern_StreamService_BulkCreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -182,18 +191,19 @@ func RegisterStreamServiceHandler(ctx context.Context, mux *runtime.ServeMux, co }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_StreamService_BulkCreate_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_StreamService_BulkCreate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_StreamService_BulkCreate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -210,18 +220,19 @@ func RegisterStreamServiceHandler(ctx context.Context, mux *runtime.ServeMux, co }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_StreamService_List_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_StreamService_List_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_StreamService_List_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) @@ -238,18 +249,19 @@ func RegisterStreamServiceHandler(ctx context.Context, mux *runtime.ServeMux, co }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_StreamService_BulkEcho_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_StreamService_BulkEcho_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_StreamService_BulkEcho_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/integration_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/integration_test.go index ba011497b6a4..44a9fa7da210 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/integration_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/integration_test.go @@ -36,7 +36,7 @@ func TestEcho(t *testing.T) { } testEcho(t, 8080, "application/json") - testEchoBody(t) + testEchoBody(t, 8080) } func TestForwardResponseOption(t *testing.T) { @@ -92,7 +92,7 @@ func testEcho(t *testing.T, port int, contentType string) { } } -func testEchoBody(t *testing.T) { +func testEchoBody(t *testing.T, port int) { sent := gw.SimpleMessage{Id: "example"} var m jsonpb.Marshaler payload, err := m.MarshalToString(&sent) @@ -100,7 +100,7 @@ func testEchoBody(t *testing.T) { t.Fatalf("m.MarshalToString(%#v) failed with %v; want success", payload, err) } - url := "http://localhost:8080/v1/example/echo_body" + url := fmt.Sprintf("http://localhost:%d/v1/example/echo_body", port) resp, err := http.Post(url, "", strings.NewReader(payload)) if err != nil { t.Errorf("http.Post(%q) failed with %v; want success", url, err) @@ -128,10 +128,10 @@ func testEchoBody(t *testing.T) { } if got, want := resp.Header.Get("Grpc-Metadata-Foo"), "foo1"; got != want { - t.Errorf("Grpc-Header-Foo was %q, wanted %q", got, want) + t.Errorf("Grpc-Metadata-Foo was %q, wanted %q", got, want) } if got, want := resp.Header.Get("Grpc-Metadata-Bar"), "bar1"; got != want { - t.Errorf("Grpc-Header-Bar was %q, wanted %q", got, want) + t.Errorf("Grpc-Metadata-Bar was %q, wanted %q", got, want) } if got, want := resp.Trailer.Get("Grpc-Trailer-Foo"), "foo2"; got != want { @@ -148,18 +148,18 @@ func TestABE(t *testing.T) { return } - testABECreate(t) - testABECreateBody(t) - testABEBulkCreate(t) - testABELookup(t) - testABELookupNotFound(t) - testABEList(t) - testABEBulkEcho(t) - testABEBulkEchoZeroLength(t) - testAdditionalBindings(t) + testABECreate(t, 8080) + testABECreateBody(t, 8080) + testABEBulkCreate(t, 8080) + testABELookup(t, 8080) + testABELookupNotFound(t, 8080) + testABEList(t, 8080) + testABEBulkEcho(t, 8080) + testABEBulkEchoZeroLength(t, 8080) + testAdditionalBindings(t, 8080) } -func testABECreate(t *testing.T) { +func testABECreate(t *testing.T, port int) { want := gw.ABitOfEverything{ FloatValue: 1.5, DoubleValue: 2.5, @@ -177,7 +177,7 @@ func testABECreate(t *testing.T) { Sint64Value: 4611686018427387903, NonConventionalNameValue: "camelCase", } - url := fmt.Sprintf("http://localhost:8080/v1/example/a_bit_of_everything/%f/%f/%d/separator/%d/%d/%d/%d/%v/%s/%d/%d/%d/%d/%d/%s", want.FloatValue, want.DoubleValue, want.Int64Value, want.Uint64Value, want.Int32Value, want.Fixed64Value, want.Fixed32Value, want.BoolValue, want.StringValue, want.Uint32Value, want.Sfixed32Value, want.Sfixed64Value, want.Sint32Value, want.Sint64Value, want.NonConventionalNameValue) + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything/%f/%f/%d/separator/%d/%d/%d/%d/%v/%s/%d/%d/%d/%d/%d/%s", port, want.FloatValue, want.DoubleValue, want.Int64Value, want.Uint64Value, want.Int32Value, want.Fixed64Value, want.Fixed32Value, want.BoolValue, want.StringValue, want.Uint32Value, want.Sfixed32Value, want.Sfixed64Value, want.Sint32Value, want.Sint64Value, want.NonConventionalNameValue) resp, err := http.Post(url, "application/json", strings.NewReader("{}")) if err != nil { @@ -210,7 +210,7 @@ func testABECreate(t *testing.T) { } } -func testABECreateBody(t *testing.T) { +func testABECreateBody(t *testing.T, port int) { want := gw.ABitOfEverything{ FloatValue: 1.5, DoubleValue: 2.5, @@ -255,7 +255,7 @@ func testABECreateBody(t *testing.T) { "b": {Name: "y", Amount: 2}, }, } - url := "http://localhost:8080/v1/example/a_bit_of_everything" + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything", port) var m jsonpb.Marshaler payload, err := m.MarshalToString(&want) if err != nil { @@ -293,7 +293,7 @@ func testABECreateBody(t *testing.T) { } } -func testABEBulkCreate(t *testing.T) { +func testABEBulkCreate(t *testing.T, port int) { count := 0 r, w := io.Pipe() go func(w io.WriteCloser) { @@ -344,7 +344,7 @@ func testABEBulkCreate(t *testing.T) { count++ } }(w) - url := "http://localhost:8080/v1/example/a_bit_of_everything/bulk" + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything/bulk", port) resp, err := http.Post(url, "application/json", r) if err != nil { t.Errorf("http.Post(%q) failed with %v; want success", url, err) @@ -369,7 +369,7 @@ func testABEBulkCreate(t *testing.T) { } if got, want := resp.Header.Get("Grpc-Metadata-Count"), fmt.Sprintf("%d", count); got != want { - t.Errorf("Grpc-Header-Count was %q, wanted %q", got, want) + t.Errorf("Grpc-Metadata-Count was %q, wanted %q", got, want) } if got, want := resp.Trailer.Get("Grpc-Trailer-Foo"), "foo2"; got != want { @@ -380,8 +380,8 @@ func testABEBulkCreate(t *testing.T) { } } -func testABELookup(t *testing.T) { - url := "http://localhost:8080/v1/example/a_bit_of_everything" +func testABELookup(t *testing.T, port int) { + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything", port) cresp, err := http.Post(url, "application/json", strings.NewReader(` {"bool_value": true, "string_value": "strprefix/example"} `)) @@ -435,8 +435,8 @@ func testABELookup(t *testing.T) { } } -func testABELookupNotFound(t *testing.T) { - url := "http://localhost:8080/v1/example/a_bit_of_everything" +func testABELookupNotFound(t *testing.T, port int) { + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything", port) uuid := "not_exist" url = fmt.Sprintf("%s/%s", url, uuid) resp, err := http.Get(url) @@ -469,6 +469,11 @@ func testABELookupNotFound(t *testing.T) { return } + if got, want := msg.Error, "not found"; got != want { + t.Errorf("msg.Error = %s; want %s", got, want) + return + } + if got, want := resp.Header.Get("Grpc-Metadata-Uuid"), uuid; got != want { t.Errorf("Grpc-Metadata-Uuid was %s, wanted %s", got, want) } @@ -480,8 +485,8 @@ func testABELookupNotFound(t *testing.T) { } } -func testABEList(t *testing.T) { - url := "http://localhost:8080/v1/example/a_bit_of_everything" +func testABEList(t *testing.T, port int) { + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything", port) resp, err := http.Get(url) if err != nil { t.Errorf("http.Get(%q) failed with %v; want success", url, err) @@ -518,7 +523,7 @@ func testABEList(t *testing.T) { value := resp.Header.Get("Grpc-Metadata-Count") if value == "" { - t.Errorf("Grpc-Header-Count should not be empty") + t.Errorf("Grpc-Metadata-Count should not be empty") } count, err := strconv.Atoi(value) @@ -531,7 +536,7 @@ func testABEList(t *testing.T) { } } -func testABEBulkEcho(t *testing.T) { +func testABEBulkEcho(t *testing.T, port int) { reqr, reqw := io.Pipe() var wg sync.WaitGroup var want []*sub.StringMessage @@ -555,7 +560,7 @@ func testABEBulkEcho(t *testing.T) { } }() - url := "http://localhost:8080/v1/example/a_bit_of_everything/echo" + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything/echo", port) req, err := http.NewRequest("POST", url, reqr) if err != nil { t.Errorf("http.NewRequest(%q, %q, reqr) failed with %v; want success", "POST", url, err) @@ -609,8 +614,8 @@ func testABEBulkEcho(t *testing.T) { } } -func testABEBulkEchoZeroLength(t *testing.T) { - url := "http://localhost:8080/v1/example/a_bit_of_everything/echo" +func testABEBulkEchoZeroLength(t *testing.T, port int) { + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything/echo", port) req, err := http.NewRequest("POST", url, bytes.NewReader(nil)) if err != nil { t.Errorf("http.NewRequest(%q, %q, bytes.NewReader(nil)) failed with %v; want success", "POST", url, err) @@ -641,10 +646,10 @@ func testABEBulkEchoZeroLength(t *testing.T) { } } -func testAdditionalBindings(t *testing.T) { +func testAdditionalBindings(t *testing.T, port int) { for i, f := range []func() *http.Response{ func() *http.Response { - url := "http://localhost:8080/v1/example/a_bit_of_everything/echo/hello" + url := fmt.Sprintf("http://localhost:%d/v1/example/a_bit_of_everything/echo/hello", port) resp, err := http.Get(url) if err != nil { t.Errorf("http.Get(%q) failed with %v; want success", url, err) @@ -653,7 +658,7 @@ func testAdditionalBindings(t *testing.T) { return resp }, func() *http.Response { - url := "http://localhost:8080/v2/example/echo" + url := fmt.Sprintf("http://localhost:%d/v2/example/echo", port) resp, err := http.Post(url, "application/json", strings.NewReader(`"hello"`)) if err != nil { t.Errorf("http.Post(%q, %q, %q) failed with %v; want success", url, "application/json", `"hello"`, err) @@ -662,7 +667,7 @@ func testAdditionalBindings(t *testing.T) { return resp }, func() *http.Response { - url := "http://localhost:8080/v2/example/echo?value=hello" + url := fmt.Sprintf("http://localhost:%d/v2/example/echo?value=hello", port) resp, err := http.Get(url) if err != nil { t.Errorf("http.Get(%q) failed with %v; want success", url, err) @@ -717,3 +722,63 @@ func TestTimeout(t *testing.T) { t.Errorf("resp.StatusCode = %d; want %d", got, want) } } + +func TestUnknownPath(t *testing.T) { + url := "http://localhost:8080" + resp, err := http.Post(url, "application/json", strings.NewReader("{}")) + if err != nil { + t.Errorf("http.Post(%q) failed with %v; want success", url, err) + return + } + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("iotuil.ReadAll(resp.Body) failed with %v; want success", err) + return + } + + if got, want := resp.StatusCode, http.StatusNotFound; got != want { + t.Errorf("resp.StatusCode = %d; want %d", got, want) + t.Logf("%s", buf) + } +} + +func TestMethodNotAllowed(t *testing.T) { + url := "http://localhost:8080/v1/example/echo/myid" + resp, err := http.Get(url) + if err != nil { + t.Errorf("http.Post(%q) failed with %v; want success", url, err) + return + } + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("iotuil.ReadAll(resp.Body) failed with %v; want success", err) + return + } + + if got, want := resp.StatusCode, http.StatusMethodNotAllowed; got != want { + t.Errorf("resp.StatusCode = %d; want %d", got, want) + t.Logf("%s", buf) + } +} + +func TestInvalidArgument(t *testing.T) { + url := "http://localhost:8080/v1/example/echo/myid/not_int64" + resp, err := http.Get(url) + if err != nil { + t.Errorf("http.Get(%q) failed with %v; want success", url, err) + return + } + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("iotuil.ReadAll(resp.Body) failed with %v; want success", err) + return + } + + if got, want := resp.StatusCode, http.StatusBadRequest; got != want { + t.Errorf("resp.StatusCode = %d; want %d", got, want) + t.Logf("%s", buf) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/main.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/main.go index 748ab36186b5..f6a16a808ce6 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/main.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/main.go @@ -96,8 +96,7 @@ func Run(address string, opts ...runtime.ServeMuxOption) error { } mux.Handle("/", gw) - http.ListenAndServe(address, allowCORS(mux)) - return nil + return http.ListenAndServe(address, allowCORS(mux)) } func main() { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/proto_error_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/proto_error_test.go new file mode 100644 index 000000000000..de3b638f7366 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/proto_error_test.go @@ -0,0 +1,170 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/jsonpb" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +func TestWithProtoErrorHandler(t *testing.T) { + go func() { + if err := Run( + ":8082", + runtime.WithProtoErrorHandler(runtime.DefaultHTTPProtoErrorHandler), + ); err != nil { + t.Errorf("gw.Run() failed with %v; want success", err) + return + } + }() + + time.Sleep(100 * time.Millisecond) + testEcho(t, 8082, "application/json") + testEchoBody(t, 8082) +} + +func TestABEWithProtoErrorHandler(t *testing.T) { + if testing.Short() { + t.Skip() + return + } + + testABECreate(t, 8082) + testABECreateBody(t, 8082) + testABEBulkCreate(t, 8082) + testABELookup(t, 8082) + testABELookupNotFoundWithProtoError(t) + testABEList(t, 8082) + testABEBulkEcho(t, 8082) + testABEBulkEchoZeroLength(t, 8082) + testAdditionalBindings(t, 8082) +} + +func testABELookupNotFoundWithProtoError(t *testing.T) { + url := "http://localhost:8082/v1/example/a_bit_of_everything" + uuid := "not_exist" + url = fmt.Sprintf("%s/%s", url, uuid) + resp, err := http.Get(url) + if err != nil { + t.Errorf("http.Get(%q) failed with %v; want success", url, err) + return + } + defer resp.Body.Close() + + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("ioutil.ReadAll(resp.Body) failed with %v; want success", err) + return + } + + if got, want := resp.StatusCode, http.StatusNotFound; got != want { + t.Errorf("resp.StatusCode = %d; want %d", got, want) + t.Logf("%s", buf) + return + } + + var msg spb.Status + if err := jsonpb.UnmarshalString(string(buf), &msg); err != nil { + t.Errorf("jsonpb.UnmarshalString(%s, &msg) failed with %v; want success", buf, err) + return + } + + if got, want := msg.Code, int32(codes.NotFound); got != want { + t.Errorf("msg.Code = %d; want %d", got, want) + return + } + + if got, want := msg.Message, "not found"; got != want { + t.Errorf("msg.Message = %s; want %s", got, want) + return + } + + if got, want := resp.Header.Get("Grpc-Metadata-Uuid"), uuid; got != want { + t.Errorf("Grpc-Metadata-Uuid was %s, wanted %s", got, want) + } + if got, want := resp.Trailer.Get("Grpc-Trailer-Foo"), "foo2"; got != want { + t.Errorf("Grpc-Trailer-Foo was %q, wanted %q", got, want) + } + if got, want := resp.Trailer.Get("Grpc-Trailer-Bar"), "bar2"; got != want { + t.Errorf("Grpc-Trailer-Bar was %q, wanted %q", got, want) + } +} + +func TestUnknownPathWithProtoError(t *testing.T) { + url := "http://localhost:8082" + resp, err := http.Post(url, "application/json", strings.NewReader("{}")) + if err != nil { + t.Errorf("http.Post(%q) failed with %v; want success", url, err) + return + } + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("iotuil.ReadAll(resp.Body) failed with %v; want success", err) + return + } + + if got, want := resp.StatusCode, http.StatusNotImplemented; got != want { + t.Errorf("resp.StatusCode = %d; want %d", got, want) + t.Logf("%s", buf) + } + + var msg spb.Status + if err := jsonpb.UnmarshalString(string(buf), &msg); err != nil { + t.Errorf("jsonpb.UnmarshalString(%s, &msg) failed with %v; want success", buf, err) + return + } + + if got, want := msg.Code, int32(codes.Unimplemented); got != want { + t.Errorf("msg.Code = %d; want %d", got, want) + return + } + + if msg.Message == "" { + t.Errorf("msg.Message should not be empty") + return + } +} + +func TestMethodNotAllowedWithProtoError(t *testing.T) { + url := "http://localhost:8082/v1/example/echo/myid" + resp, err := http.Get(url) + if err != nil { + t.Errorf("http.Post(%q) failed with %v; want success", url, err) + return + } + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("iotuil.ReadAll(resp.Body) failed with %v; want success", err) + return + } + + if got, want := resp.StatusCode, http.StatusNotImplemented; got != want { + t.Errorf("resp.StatusCode = %d; want %d", got, want) + t.Logf("%s", buf) + } + + var msg spb.Status + if err := jsonpb.UnmarshalString(string(buf), &msg); err != nil { + t.Errorf("jsonpb.UnmarshalString(%s, &msg) failed with %v; want success", buf, err) + return + } + + if got, want := msg.Code, int32(codes.Unimplemented); got != want { + t.Errorf("msg.Code = %d; want %d", got, want) + return + } + + if msg.Message == "" { + t.Errorf("msg.Message should not be empty") + return + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/server/a_bit_of_everything.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/server/a_bit_of_everything.go index 8c1badd95ee1..38c6467c54e6 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/server/a_bit_of_everything.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/server/a_bit_of_everything.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/golang/glog" + "github.com/golang/protobuf/ptypes/duration" "github.com/golang/protobuf/ptypes/empty" examples "github.com/grpc-ecosystem/grpc-gateway/examples/examplepb" sub "github.com/grpc-ecosystem/grpc-gateway/examples/sub" @@ -15,6 +16,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // Implements of ABitOfEverythingServiceServer @@ -111,7 +113,7 @@ func (s *_ABitOfEverythingServer) Lookup(ctx context.Context, msg *sub2.IdMessag "foo": "foo2", "bar": "bar2", })) - return nil, grpc.Errorf(codes.NotFound, "not found") + return nil, status.Errorf(codes.NotFound, "not found") } func (s *_ABitOfEverythingServer) List(_ *empty.Empty, stream examples.StreamService_ListServer) error { @@ -132,13 +134,13 @@ func (s *_ABitOfEverythingServer) List(_ *empty.Empty, stream examples.StreamSer } // return error when metadata includes error header - if header, ok := metadata.FromContext(stream.Context()); ok { + if header, ok := metadata.FromIncomingContext(stream.Context()); ok { if v, ok := header["error"]; ok { stream.SetTrailer(metadata.New(map[string]string{ "foo": "foo2", "bar": "bar2", })) - return grpc.Errorf(codes.InvalidArgument, "error metadata: %v", v) + return status.Errorf(codes.InvalidArgument, "error metadata: %v", v) } } return nil @@ -152,7 +154,7 @@ func (s *_ABitOfEverythingServer) Update(ctx context.Context, msg *examples.ABit if _, ok := s.v[msg.Uuid]; ok { s.v[msg.Uuid] = msg } else { - return nil, grpc.Errorf(codes.NotFound, "not found") + return nil, status.Errorf(codes.NotFound, "not found") } return new(empty.Empty), nil } @@ -165,7 +167,20 @@ func (s *_ABitOfEverythingServer) Delete(ctx context.Context, msg *sub2.IdMessag if _, ok := s.v[msg.Uuid]; ok { delete(s.v, msg.Uuid) } else { - return nil, grpc.Errorf(codes.NotFound, "not found") + return nil, status.Errorf(codes.NotFound, "not found") + } + return new(empty.Empty), nil +} + +func (s *_ABitOfEverythingServer) GetQuery(ctx context.Context, msg *examples.ABitOfEverything) (*empty.Empty, error) { + s.m.Lock() + defer s.m.Unlock() + + glog.Info(msg) + if _, ok := s.v[msg.Uuid]; ok { + s.v[msg.Uuid] = msg + } else { + return nil, status.Errorf(codes.NotFound, "not found") } return new(empty.Empty), nil } @@ -221,6 +236,10 @@ func (s *_ABitOfEverythingServer) DeepPathEcho(ctx context.Context, msg *example return msg, nil } +func (s *_ABitOfEverythingServer) NoBindings(ctx context.Context, msg *duration.Duration) (*empty.Empty, error) { + return nil, nil +} + func (s *_ABitOfEverythingServer) Timeout(ctx context.Context, msg *empty.Empty) (*empty.Empty, error) { select { case <-ctx.Done(): diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub/message.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub/message.pb.go index 58a9be92c13f..9faad923a578 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub/message.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub/message.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/sub/message.proto -// DO NOT EDIT! /* Package sub is a generated protocol buffer package. @@ -53,7 +52,7 @@ func init() { proto.RegisterFile("examples/sub/message.proto", fileDescriptor0) var fileDescriptor0 = []byte{ // 111 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0xad, 0x48, 0xcc, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4c, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0x4f, 0x2c, 0x49, 0x2d, 0x4f, 0xac, 0xd4, 0x83, 0x29, 0xd4, 0x2b, 0x2e, 0x4d, 0x52, 0x52, 0xe5, 0xe2, 0x0d, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub2/message.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub2/message.pb.go index 37d6ee759fb3..710d9525101f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub2/message.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/examples/sub2/message.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: examples/sub2/message.proto -// DO NOT EDIT! /* Package sub2 is a generated protocol buffer package. @@ -37,6 +36,13 @@ func (m *IdMessage) String() string { return proto.CompactTextString( func (*IdMessage) ProtoMessage() {} func (*IdMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *IdMessage) GetUuid() string { + if m != nil { + return m.Uuid + } + return "" +} + func init() { proto.RegisterType((*IdMessage)(nil), "sub2.IdMessage") } @@ -45,7 +51,7 @@ func init() { proto.RegisterFile("examples/sub2/message.proto", fileDescriptor0) var fileDescriptor0 = []byte{ // 128 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xad, 0x48, 0xcc, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0xd6, 0x2f, 0x2e, 0x4d, 0x32, 0xd2, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x01, 0x89, 0x29, 0xc9, 0x73, 0x71, 0x7a, 0xa6, 0xf8, 0x42, 0x24, 0x84, 0x84, 0xb8, 0x58, 0x4a, 0x4b, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go index f293f5558241..9e4f3428dd30 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry.go @@ -30,6 +30,9 @@ type Registry struct { // pkgAliases is a mapping from package aliases to package paths in go which are already taken. pkgAliases map[string]string + + // allowDeleteBody permits http delete methods to have a body + allowDeleteBody bool } // NewRegistry returns a new Registry. @@ -204,7 +207,7 @@ func (r *Registry) AddPkgMap(file, protoPkg string) { r.pkgMap[file] = protoPkg } -// SetPrefix registeres the perfix to be added to go package paths generated from proto package names. +// SetPrefix registers the prefix to be added to go package paths generated from proto package names. func (r *Registry) SetPrefix(prefix string) { r.prefix = prefix } @@ -236,6 +239,9 @@ func (r *Registry) goPackagePath(f *descriptor.FileDescriptorProto) string { gopkg := f.Options.GetGoPackage() idx := strings.LastIndex(gopkg, "/") if idx >= 0 { + if sc := strings.LastIndex(gopkg, ";"); sc > 0 { + gopkg = gopkg[:sc+1-1] + } return gopkg } @@ -260,11 +266,25 @@ func (r *Registry) GetAllFQENs() []string { return keys } +// SetAllowDeleteBody controls whether http delete methods may have a +// body or fail loading if encountered. +func (r *Registry) SetAllowDeleteBody(allow bool) { + r.allowDeleteBody = allow +} + +// sanitizePackageName replaces unallowed character in package name +// with allowed character. +func sanitizePackageName(pkgName string) string { + pkgName = strings.Replace(pkgName, ".", "_", -1) + pkgName = strings.Replace(pkgName, "-", "_", -1) + return pkgName +} + // defaultGoPackageName returns the default go package name to be used for go files generated from "f". // You might need to use an unique alias for the package when you import it. Use ReserveGoPackageAlias to get a unique alias. func defaultGoPackageName(f *descriptor.FileDescriptorProto) string { name := packageIdentityName(f) - return strings.Replace(name, ".", "_", -1) + return sanitizePackageName(name) } // packageIdentityName returns the identity of packages. @@ -275,10 +295,18 @@ func packageIdentityName(f *descriptor.FileDescriptorProto) string { gopkg := f.Options.GetGoPackage() idx := strings.LastIndex(gopkg, "/") if idx < 0 { - return gopkg + gopkg = gopkg[idx+1:] } - return gopkg[idx+1:] + gopkg = gopkg[idx+1:] + // package name is overrided with the string after the + // ';' character + sc := strings.IndexByte(gopkg, ';') + if sc < 0 { + return sanitizePackageName(gopkg) + + } + return sanitizePackageName(gopkg[sc+1:]) } if f.Package == nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry_test.go index ff3f4dc17992..b73c967b2959 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/registry_test.go @@ -531,3 +531,21 @@ func TestLoadWithInconsistentTargetPackage(t *testing.T) { } } } + +func TestLoadOverridedPackageName(t *testing.T) { + reg := NewRegistry() + loadFile(t, reg, ` + name: 'example.proto' + package: 'example' + options < go_package: 'example.com/xyz;pb' > + `) + file := reg.files["example.proto"] + if file == nil { + t.Errorf("reg.files[%q] = nil; want non-nil", "example.proto") + return + } + wantPkg := GoPackage{Path: "example.com/xyz", Name: "pb"} + if got, want := file.GoPkg, wantPkg; got != want { + t.Errorf("file.GoPkg = %#v; want %#v", got, want) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go index 8f7cb43c669e..c9dfec8e0405 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services.go @@ -8,7 +8,7 @@ import ( "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule" - options "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" + options "google.golang.org/genproto/googleapis/api/annotations" ) // loadServices registers services and their methods from "targetFile" to "r". @@ -31,8 +31,7 @@ func (r *Registry) loadServices(file *File) error { return err } if opts == nil { - glog.V(1).Infof("Skip non-target method: %s.%s", svc.GetName(), md.GetName()) - continue + glog.V(1).Infof("Found non-target method: %s.%s", svc.GetName(), md.GetName()) } meth, err := r.newMethod(svc, md, opts) if err != nil { @@ -90,7 +89,7 @@ func (r *Registry) newMethod(svc *Service, md *descriptor.MethodDescriptorProto, case opts.GetDelete() != "": httpMethod = "DELETE" pathTemplate = opts.GetDelete() - if opts.Body != "" { + if opts.Body != "" && !r.allowDeleteBody { return nil, fmt.Errorf("needs request body even though http method is DELETE: %s", md.GetName()) } @@ -104,8 +103,8 @@ func (r *Registry) newMethod(svc *Service, md *descriptor.MethodDescriptorProto, pathTemplate = custom.Path default: - glog.Errorf("No pattern specified in google.api.HttpRule: %s", md.GetName()) - return nil, fmt.Errorf("none of pattern specified") + glog.V(1).Infof("No pattern specified in google.api.HttpRule: %s", md.GetName()) + return nil, nil } parsed, err := httprule.Parse(pathTemplate) @@ -147,7 +146,9 @@ func (r *Registry) newMethod(svc *Service, md *descriptor.MethodDescriptorProto, return nil, err } - meth.Bindings = append(meth.Bindings, b) + if b != nil { + meth.Bindings = append(meth.Bindings, b) + } for i, additional := range opts.GetAdditionalBindings() { if len(additional.AdditionalBindings) > 0 { return nil, fmt.Errorf("additional_binding in additional_binding not allowed: %s.%s", svc.GetName(), meth.GetName()) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services_test.go index 7b4af4e68988..eda34d4141e4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor/services_test.go @@ -1107,3 +1107,104 @@ func TestResolveFieldPath(t *testing.T) { } } } + +func TestExtractServicesWithDeleteBody(t *testing.T) { + for _, spec := range []struct { + allowDeleteBody bool + expectErr bool + target string + srcs []string + }{ + // body for DELETE, but registry configured to allow it + { + allowDeleteBody: true, + expectErr: false, + target: "path/to/example.proto", + srcs: []string{ + ` + name: "path/to/example.proto", + package: "example" + message_type < + name: "StringMessage" + field < + name: "string" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + > + > + service < + name: "ExampleService" + method < + name: "RemoveResource" + input_type: "StringMessage" + output_type: "StringMessage" + options < + [google.api.http] < + delete: "/v1/example/resource" + body: "string" + > + > + > + > + `, + }, + }, + // body for DELETE, registry configured not to allow it + { + allowDeleteBody: false, + expectErr: true, + target: "path/to/example.proto", + srcs: []string{ + ` + name: "path/to/example.proto", + package: "example" + message_type < + name: "StringMessage" + field < + name: "string" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + > + > + service < + name: "ExampleService" + method < + name: "RemoveResource" + input_type: "StringMessage" + output_type: "StringMessage" + options < + [google.api.http] < + delete: "/v1/example/resource" + body: "string" + > + > + > + > + `, + }, + }, + } { + reg := NewRegistry() + reg.SetAllowDeleteBody(spec.allowDeleteBody) + + var fds []*descriptor.FileDescriptorProto + for _, src := range spec.srcs { + var fd descriptor.FileDescriptorProto + if err := proto.UnmarshalText(src, &fd); err != nil { + t.Fatalf("proto.UnmarshalText(%s, &fd) failed with %v; want success", src, err) + } + reg.loadFile(&fd) + fds = append(fds, &fd) + } + err := reg.loadServices(reg.files[spec.target]) + if spec.expectErr && err == nil { + t.Errorf("loadServices(%q) succeeded; want an error; allowDeleteBody=%v, files=%v", spec.target, spec.allowDeleteBody, spec.srcs) + } + if !spec.expectErr && err != nil { + t.Errorf("loadServices(%q) failed; do not want an error; allowDeleteBody=%v, files=%v", spec.target, spec.allowDeleteBody, spec.srcs) + } + t.Log(err) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go index b4cce8695eb0..d08609bc7290 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator.go @@ -13,6 +13,7 @@ import ( plugin "github.com/golang/protobuf/protoc-gen-go/plugin" "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor" gen "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator" + options "google.golang.org/genproto/googleapis/api/annotations" ) var ( @@ -20,12 +21,13 @@ var ( ) type generator struct { - reg *descriptor.Registry - baseImports []descriptor.GoPackage + reg *descriptor.Registry + baseImports []descriptor.GoPackage + useRequestContext bool } // New returns a new generator which generates grpc gateway files. -func New(reg *descriptor.Registry) gen.Generator { +func New(reg *descriptor.Registry, useRequestContext bool) gen.Generator { var imports []descriptor.GoPackage for _, pkgpath := range []string{ "io", @@ -37,6 +39,7 @@ func New(reg *descriptor.Registry) gen.Generator { "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/status", } { pkg := descriptor.GoPackage{ Path: pkgpath, @@ -54,7 +57,7 @@ func New(reg *descriptor.Registry) gen.Generator { } imports = append(imports, pkg) } - return &generator{reg: reg, baseImports: imports} + return &generator{reg: reg, baseImports: imports, useRequestContext: useRequestContext} } func (g *generator) Generate(targets []*descriptor.File) ([]*plugin.CodeGeneratorResponse_File, error) { @@ -97,15 +100,13 @@ func (g *generator) generate(file *descriptor.File) (string, error) { for _, svc := range file.Services { for _, m := range svc.Methods { pkg := m.RequestType.File.GoPkg - if pkg == file.GoPkg { - continue - } - if pkgSeen[pkg.Path] { + if m.Options == nil || !proto.HasExtension(m.Options, options.E_Http) || + pkg == file.GoPkg || pkgSeen[pkg.Path] { continue } pkgSeen[pkg.Path] = true imports = append(imports, pkg) } } - return applyTemplate(param{File: file, Imports: imports}) + return applyTemplate(param{File: file, Imports: imports, UseRequestContext: g.useRequestContext}) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator_test.go new file mode 100644 index 000000000000..755a09236e27 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/generator_test.go @@ -0,0 +1,88 @@ +package gengateway + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + protodescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor" +) + +func TestGenerateServiceWithoutBindings(t *testing.T) { + msgdesc := &protodescriptor.DescriptorProto{ + Name: proto.String("ExampleMessage"), + } + msg := &descriptor.Message{ + DescriptorProto: msgdesc, + } + msg1 := &descriptor.Message{ + DescriptorProto: msgdesc, + File: &descriptor.File{ + GoPkg: descriptor.GoPackage{ + Path: "github.com/golang/protobuf/ptypes/empty", + Name: "empty", + }, + }, + } + meth := &protodescriptor.MethodDescriptorProto{ + Name: proto.String("Example"), + InputType: proto.String("ExampleMessage"), + OutputType: proto.String("ExampleMessage"), + } + meth1 := &protodescriptor.MethodDescriptorProto{ + Name: proto.String("ExampleWithoutBindings"), + InputType: proto.String("empty.Empty"), + OutputType: proto.String("empty.Empty"), + } + svc := &protodescriptor.ServiceDescriptorProto{ + Name: proto.String("ExampleService"), + Method: []*protodescriptor.MethodDescriptorProto{meth, meth1}, + } + file := descriptor.File{ + FileDescriptorProto: &protodescriptor.FileDescriptorProto{ + Name: proto.String("example.proto"), + Package: proto.String("example"), + Dependency: []string{"a.example/b/c.proto", "a.example/d/e.proto"}, + MessageType: []*protodescriptor.DescriptorProto{msgdesc}, + Service: []*protodescriptor.ServiceDescriptorProto{svc}, + }, + GoPkg: descriptor.GoPackage{ + Path: "example.com/path/to/example/example.pb", + Name: "example_pb", + }, + Messages: []*descriptor.Message{msg}, + Services: []*descriptor.Service{ + { + ServiceDescriptorProto: svc, + Methods: []*descriptor.Method{ + { + MethodDescriptorProto: meth, + RequestType: msg, + ResponseType: msg, + Bindings: []*descriptor.Binding{ + { + HTTPMethod: "GET", + Body: &descriptor.Body{FieldPath: nil}, + }, + }, + }, + { + MethodDescriptorProto: meth1, + RequestType: msg1, + ResponseType: msg1, + }, + }, + }, + }, + } + g := &generator{} + got, err := g.generate(crossLinkFixture(&file)) + if err != nil { + t.Errorf("generate(%#v) failed with %v; want success", file, err) + return + } + if notwanted := `"github.com/golang/protobuf/ptypes/empty"`; strings.Contains(got, notwanted) { + t.Errorf("generate(%#v) = %s; does not want to contain %s", file, got, notwanted) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go index 3c3da539b956..c6e55c130f7c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway/template.go @@ -13,7 +13,8 @@ import ( type param struct { *descriptor.File - Imports []descriptor.GoPackage + Imports []descriptor.GoPackage + UseRequestContext bool } type binding struct { @@ -66,27 +67,43 @@ func (f queryParamFilter) String() string { return fmt.Sprintf("&utilities.DoubleArray{Encoding: map[string]int{%s}, Base: %#v, Check: %#v}", e, f.Base, f.Check) } +type trailerParams struct { + Services []*descriptor.Service + UseRequestContext bool +} + func applyTemplate(p param) (string, error) { w := bytes.NewBuffer(nil) if err := headerTemplate.Execute(w, p); err != nil { return "", err } - var methodSeen bool + var targetServices []*descriptor.Service for _, svc := range p.Services { + var methodWithBindingsSeen bool for _, meth := range svc.Methods { glog.V(2).Infof("Processing %s.%s", svc.GetName(), meth.GetName()) - methodSeen = true + methName := strings.Title(*meth.Name) + meth.Name = &methName for _, b := range meth.Bindings { + methodWithBindingsSeen = true if err := handlerTemplate.Execute(w, binding{Binding: b}); err != nil { return "", err } } } + if methodWithBindingsSeen { + targetServices = append(targetServices, svc) + } } - if !methodSeen { + if len(targetServices) == 0 { return "", errNoTargetService } - if err := trailerTemplate.Execute(w, p.Services); err != nil { + + tp := trailerParams{ + Services: targetServices, + UseRequestContext: p.UseRequestContext, + } + if err := trailerTemplate.Execute(w, tp); err != nil { return "", err } return w.String(), nil @@ -94,9 +111,8 @@ func applyTemplate(p param) (string, error) { var ( headerTemplate = template.Must(template.New("header").Parse(` -// Code generated by protoc-gen-grpc-gateway +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: {{.GetName}} -// DO NOT EDIT! /* Package {{.GoPkg.Name}} is a reverse proxy. @@ -112,6 +128,7 @@ import ( var _ codes.Code var _ io.Reader +var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray `)) @@ -150,7 +167,7 @@ func request_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}(ctx cont } if err != nil { grpclog.Printf("Failed to decode request: %v", err) - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err = stream.Send(&protoReq); err != nil { grpclog.Printf("Failed to send request: %v", err) @@ -189,7 +206,7 @@ var ( var metadata runtime.ServerMetadata {{if .Body}} if err := marshaler.NewDecoder(req.Body).Decode(&{{.Body.RHS "protoReq"}}); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } {{end}} {{if .PathParams}} @@ -202,7 +219,7 @@ var ( {{range $param := .PathParams}} val, ok = pathParams[{{$param | printf "%q"}}] if !ok { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "missing parameter %s", {{$param | printf "%q"}}) + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", {{$param | printf "%q"}}) } {{if $param.IsNestedProto3 }} err = runtime.PopulateFieldFromPath(&protoReq, {{$param | printf "%q"}}, val) @@ -210,13 +227,13 @@ var ( {{$param.RHS "protoReq"}}, err = {{$param.ConvertFuncExpr}}(val) {{end}} if err != nil { - return nil, metadata, err + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", {{$param | printf "%q"}}, err) } {{end}} {{end}} {{if .HasQueryParam}} if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_{{.Method.Service.GetName}}_{{.Method.GetName}}_{{.Index}}); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } {{end}} {{if .Method.GetServerStreaming}} @@ -291,7 +308,8 @@ var ( `)) trailerTemplate = template.Must(template.New("trailer").Parse(` -{{range $svc := .}} +{{$UseRequestContext := .UseRequestContext}} +{{range $svc := .Services}} // Register{{$svc.GetName}}HandlerFromEndpoint is same as Register{{$svc.GetName}}Handler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func Register{{$svc.GetName}}HandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { @@ -320,11 +338,23 @@ func Register{{$svc.GetName}}HandlerFromEndpoint(ctx context.Context, mux *runti // Register{{$svc.GetName}}Handler registers the http handlers for service {{$svc.GetName}} to "mux". // The handlers forward requests to the grpc endpoint over "conn". func Register{{$svc.GetName}}Handler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := New{{$svc.GetName}}Client(conn) + return Register{{$svc.GetName}}HandlerClient(ctx, mux, New{{$svc.GetName}}Client(conn)) +} + +// Register{{$svc.GetName}}Handler registers the http handlers for service {{$svc.GetName}} to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "{{$svc.GetName}}Client". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "{{$svc.GetName}}Client" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "{{$svc.GetName}}Client" to call the correct interceptors. +func Register{{$svc.GetName}}HandlerClient(ctx context.Context, mux *runtime.ServeMux, client {{$svc.GetName}}Client) error { {{range $m := $svc.Methods}} {{range $b := $m.Bindings}} mux.Handle({{$b.HTTPMethod | printf "%q"}}, pattern_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + {{- if $UseRequestContext }} + ctx, cancel := context.WithCancel(req.Context()) + {{- else -}} ctx, cancel := context.WithCancel(ctx) + {{- end }} defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { @@ -336,20 +366,21 @@ func Register{{$svc.GetName}}Handler(ctx context.Context, mux *runtime.ServeMux, }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } resp, md, err := request_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } {{if $m.GetServerStreaming}} - forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) {{else}} - forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_{{$svc.GetName}}_{{$m.GetName}}_{{$b.Index}}(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) {{end}} }) {{end}} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go index 97122f204511..3be742685c66 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule/parse.go @@ -92,7 +92,7 @@ func tokenize(path string) (tokens []string, verb string) { return tokens, verb } -// parser is a parser of the template syntax defined in third_party/googleapis/google/api/httprule.proto. +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. type parser struct { tokens []string accepted []string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go index 1c01d8756ce8..cf365f249e16 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/main.go @@ -23,7 +23,9 @@ import ( ) var ( - importPrefix = flag.String("import_prefix", "", "prefix to be added to go package paths for imported proto files") + importPrefix = flag.String("import_prefix", "", "prefix to be added to go package paths for imported proto files") + useRequestContext = flag.Bool("request_context", false, "determine whether to use http.Request's context or not") + allowDeleteBody = flag.Bool("allow_delete_body", false, "unless set, HTTP DELETE methods may not have a body") ) func parseReq(r io.Reader) (*plugin.CodeGeneratorRequest, error) { @@ -73,9 +75,10 @@ func main() { } } - g := gengateway.New(reg) + g := gengateway.New(reg, *useRequestContext) reg.SetPrefix(*importPrefix) + reg.SetAllowDeleteBody(*allowDeleteBody) if err := reg.Load(req); err != nil { emitError(err) return diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go index a8431d29e1c9..5bd1c3f93287 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template.go @@ -4,21 +4,132 @@ import ( "bytes" "encoding/json" "fmt" + "os" "reflect" "regexp" "strconv" "strings" + "sync" + "github.com/golang/protobuf/proto" pbdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor" + swagger_options "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" ) +func listEnumNames(enum *descriptor.Enum) (names []string) { + for _, value := range enum.GetValue() { + names = append(names, value.GetName()) + } + return names +} + +func getEnumDefault(enum *descriptor.Enum) string { + for _, value := range enum.GetValue() { + if value.GetNumber() == 0 { + return value.GetName() + } + } + return "" +} + +// messageToQueryParameters converts a message to a list of swagger query parameters. +func messageToQueryParameters(message *descriptor.Message, reg *descriptor.Registry, pathParams []descriptor.Parameter) (params []swaggerParameterObject, err error) { + for _, field := range message.Fields { + p, err := queryParams(message, field, "", reg, pathParams) + if err != nil { + return nil, err + } + params = append(params, p...) + } + return params, nil +} + +// queryParams converts a field to a list of swagger query parameters recuresively. +func queryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter) (params []swaggerParameterObject, err error) { + // make sure the parameter is not already listed as a path parameter + for _, pathParam := range pathParams { + if pathParam.Target == field { + return nil, nil + } + } + schema := schemaOfField(field, reg) + fieldType := field.GetTypeName() + if message.File != nil { + comments := fieldProtoComments(reg, message, field) + if err := updateSwaggerDataFromComments(&schema, comments); err != nil { + return nil, err + } + } + + isEnum := field.GetType() == pbdescriptor.FieldDescriptorProto_TYPE_ENUM + items := schema.Items + if schema.Type != "" || isEnum { + if schema.Type == "object" { + return nil, nil // TODO: currently, mapping object in query parameter is not supported + } + if items != nil && (items.Type == "" || items.Type == "object") && !isEnum { + return nil, nil // TODO: currently, mapping object in query parameter is not supported + } + desc := schema.Description + if schema.Title != "" { // merge title because title of parameter object will be ignored + desc = strings.TrimSpace(schema.Title + ". " + schema.Description) + } + param := swaggerParameterObject{ + Name: prefix + field.GetName(), + Description: desc, + In: "query", + Type: schema.Type, + Items: schema.Items, + Format: schema.Format, + } + if isEnum { + enum, err := reg.LookupEnum("", fieldType) + if err != nil { + return nil, fmt.Errorf("unknown enum type %s", fieldType) + } + if items != nil { // array + param.Items = &swaggerItemsObject{ + Type: "string", + Enum: listEnumNames(enum), + } + } else { + param.Type = "string" + param.Enum = listEnumNames(enum) + param.Default = getEnumDefault(enum) + } + valueComments := enumValueProtoComments(reg, enum) + if valueComments != "" { + param.Description = strings.TrimLeft(param.Description+"\n\n "+valueComments, "\n") + } + } + return []swaggerParameterObject{param}, nil + } + + // nested type, recurse + msg, err := reg.LookupMsg("", fieldType) + if err != nil { + return nil, fmt.Errorf("unknown message type %s", fieldType) + } + for _, nestedField := range msg.Fields { + p, err := queryParams(msg, nestedField, prefix+field.GetName()+".", reg, pathParams) + if err != nil { + return nil, err + } + params = append(params, p...) + } + return params, nil +} + // findServicesMessagesAndEnumerations discovers all messages and enums defined in the RPC methods of the service. -func findServicesMessagesAndEnumerations(s []*descriptor.Service, reg *descriptor.Registry, m messageMap, e enumMap) { +func findServicesMessagesAndEnumerations(s []*descriptor.Service, reg *descriptor.Registry, m messageMap, e enumMap, refs refMap) { for _, svc := range s { for _, meth := range svc.Methods { - m[fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg)] = meth.RequestType - findNestedMessagesAndEnumerations(meth.RequestType, reg, m, e) + // Request may be fully included in query + if _, ok := refs[fmt.Sprintf("#/definitions/%s", fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg))]; ok { + m[fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg)] = meth.RequestType + findNestedMessagesAndEnumerations(meth.RequestType, reg, m, e) + } m[fullyQualifiedNameToSwaggerName(meth.ResponseType.FQMN(), reg)] = meth.ResponseType findNestedMessagesAndEnumerations(meth.ResponseType, reg, m, e) } @@ -62,23 +173,39 @@ func renderMessagesAsDefinition(messages messageMap, d swaggerDefinitionsObject, schemaCore: schemaCore{ Type: "object", }, - Properties: make(map[string]swaggerSchemaObject), } msgComments := protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index)) if err := updateSwaggerDataFromComments(&schema, msgComments); err != nil { panic(err) } + opts, err := extractSchemaOptionFromMessageDescriptor(msg.DescriptorProto) + if err != nil { + panic(err) + } + if opts != nil { + if opts.ExternalDocs != nil { + if schema.ExternalDocs == nil { + schema.ExternalDocs = &swaggerExternalDocumentationObject{} + } + if opts.ExternalDocs.Description != "" { + schema.ExternalDocs.Description = opts.ExternalDocs.Description + } + if opts.ExternalDocs.Url != "" { + schema.ExternalDocs.URL = opts.ExternalDocs.Url + } + } - for i, f := range msg.Fields { - fieldValue := schemaOfField(f, reg) + // TODO(ivucica): add remaining fields of schema object + } - fieldProtoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil)), "Field") - fieldProtoComments := protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index), fieldProtoPath, int32(i)) - if err := updateSwaggerDataFromComments(&fieldValue, fieldProtoComments); err != nil { + for _, f := range msg.Fields { + fieldValue := schemaOfField(f, reg) + comments := fieldProtoComments(reg, msg, f) + if err := updateSwaggerDataFromComments(&fieldValue, comments); err != nil { panic(err) } - schema.Properties[f.GetName()] = fieldValue + schema.Properties = append(schema.Properties, keyVal{f.GetName(), fieldValue}) } d[fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)] = schema } @@ -131,9 +258,9 @@ func schemaOfField(f *descriptor.Field, reg *descriptor.Registry) swaggerSchemaO case array: return swaggerSchemaObject{ schemaCore: schemaCore{ - Type: "array", + Type: "array", + Items: (*swaggerItemsObject)(&core), }, - Items: (*swaggerItemsObject)(&core), } case object: return swaggerSchemaObject{ @@ -176,7 +303,8 @@ func primitiveSchema(t pbdescriptor.FieldDescriptorProto_Type) (ftype, format st case pbdescriptor.FieldDescriptorProto_TYPE_BOOL: return "boolean", "boolean", true case pbdescriptor.FieldDescriptorProto_TYPE_STRING: - return "string", "string", true + // NOTE: in swagger specifition, format should be empty on string type + return "string", "", true case pbdescriptor.FieldDescriptorProto_TYPE_BYTES: return "string", "byte", true case pbdescriptor.FieldDescriptorProto_TYPE_UINT32: @@ -197,35 +325,22 @@ func primitiveSchema(t pbdescriptor.FieldDescriptorProto_Type) (ftype, format st // renderEnumerationsAsDefinition inserts enums into the definitions object. func renderEnumerationsAsDefinition(enums enumMap, d swaggerDefinitionsObject, reg *descriptor.Registry) { - valueProtoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.EnumDescriptorProto)(nil)), "Value") for _, enum := range enums { enumComments := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index)) - var enumNames []string // it may be necessary to sort the result of the GetValue function. - var defaultValue string - var valueDescriptions []string - for valueIdx, value := range enum.GetValue() { - enumNames = append(enumNames, value.GetName()) - if defaultValue == "" && value.GetNumber() == 0 { - defaultValue = value.GetName() - } - - valueDescription := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index), valueProtoPath, int32(valueIdx)) - if valueDescription != "" { - valueDescriptions = append(valueDescriptions, value.GetName()+": "+valueDescription) - } - } - - if len(valueDescriptions) > 0 { - enumComments += "\n\n - " + strings.Join(valueDescriptions, "\n - ") + enumNames := listEnumNames(enum) + defaultValue := getEnumDefault(enum) + valueComments := enumValueProtoComments(reg, enum) + if valueComments != "" { + enumComments = strings.TrimLeft(enumComments+"\n\n "+valueComments, "\n") } enumSchemaObject := swaggerSchemaObject{ schemaCore: schemaCore{ - Type: "string", + Type: "string", + Enum: enumNames, + Default: defaultValue, }, - Enum: enumNames, - Default: defaultValue, } if err := updateSwaggerDataFromComments(&enumSchemaObject, enumComments); err != nil { panic(err) @@ -237,9 +352,21 @@ func renderEnumerationsAsDefinition(enums enumMap, d swaggerDefinitionsObject, r // Take in a FQMN or FQEN and return a swagger safe version of the FQMN func fullyQualifiedNameToSwaggerName(fqn string, reg *descriptor.Registry) string { - return resolveFullyQualifiedNameToSwaggerName(fqn, append(reg.GetAllFQMNs(), reg.GetAllFQENs()...)) + registriesSeenMutex.Lock() + defer registriesSeenMutex.Unlock() + if mapping, present := registriesSeen[reg]; present { + return mapping[fqn] + } + mapping := resolveFullyQualifiedNameToSwaggerNames(append(reg.GetAllFQMNs(), reg.GetAllFQENs()...)) + registriesSeen[reg] = mapping + return mapping[fqn] } +// registriesSeen is used to memoise calls to resolveFullyQualifiedNameToSwaggerNames so +// we don't repeat it unnecessarily, since it can take some time. +var registriesSeen = map[*descriptor.Registry]map[string]string{} +var registriesSeenMutex sync.Mutex + // Take the names of every proto and "uniq-ify" them. The idea is to produce a // set of names that meet a couple of conditions. They must be stable, they // must be unique, and they must be shorter than the FQN. @@ -247,7 +374,7 @@ func fullyQualifiedNameToSwaggerName(fqn string, reg *descriptor.Registry) strin // This likely could be made better. This will always generate the same names // but may not always produce optimal names. This is a reasonably close // approximation of what they should look like in most cases. -func resolveFullyQualifiedNameToSwaggerName(fqn string, messages []string) string { +func resolveFullyQualifiedNameToSwaggerNames(messages []string) map[string]string { packagesByDepth := make(map[int][][]string) uniqueNames := make(map[string]string) @@ -287,7 +414,7 @@ func resolveFullyQualifiedNameToSwaggerName(fqn string, messages []string) strin } } } - return uniqueNames[fqn] + return uniqueNames } // Swagger expects paths of the form /path/{string_value} but grpc-gateway paths are expected to be of the form /path/{string_value=strprefix/*}. This should reformat it correctly. @@ -342,11 +469,11 @@ func templateToSwaggerPath(path string) string { return strings.Join(parts, "/") } -func renderServices(services []*descriptor.Service, paths swaggerPathsObject, reg *descriptor.Registry) error { +func renderServices(services []*descriptor.Service, paths swaggerPathsObject, reg *descriptor.Registry, refs refMap) error { // Correctness of svcIdx and methIdx depends on 'services' containing the services in the same order as the 'file.Service' array. for svcIdx, svc := range services { for methIdx, meth := range svc.Methods { - for _, b := range meth.Bindings { + for bIdx, b := range meth.Bindings { // Iterate over all the swagger parameters parameters := swaggerParametersObject{} for _, parameter := range b.PathParams { @@ -386,7 +513,7 @@ func renderServices(services []*descriptor.Service, paths swaggerPathsObject, re }, } } else { - lastField := b.Body.FieldPath[len(b.Body.FieldPath) - 1] + lastField := b.Body.FieldPath[len(b.Body.FieldPath)-1] schema = schemaOfField(lastField.Target, reg) } @@ -399,8 +526,15 @@ func renderServices(services []*descriptor.Service, paths swaggerPathsObject, re Description: desc, In: "body", Required: true, - Schema: &schema, + Schema: &schema, }) + } else if b.HTTPMethod == "GET" { + // add the parameters to the query string + queryParams, err := messageToQueryParameters(meth.RequestType, reg, b.PathParams) + if err != nil { + return err + } + parameters = append(parameters, queryParams...) } pathItemObject, ok := paths[templateToSwaggerPath(b.PathTmpl.Template)] @@ -414,9 +548,8 @@ func renderServices(services []*descriptor.Service, paths swaggerPathsObject, re desc += "(streaming responses)" } operationObject := &swaggerOperationObject{ - Tags: []string{svc.GetName()}, - OperationID: fmt.Sprintf("%s", meth.GetName()), - Parameters: parameters, + Tags: []string{svc.GetName()}, + Parameters: parameters, Responses: swaggerResponsesObject{ "200": swaggerResponseObject{ Description: desc, @@ -428,11 +561,47 @@ func renderServices(services []*descriptor.Service, paths swaggerPathsObject, re }, }, } + if bIdx == 0 { + operationObject.OperationID = fmt.Sprintf("%s", meth.GetName()) + } else { + // OperationID must be unique in an OpenAPI v2 definition. + operationObject.OperationID = fmt.Sprintf("%s%d", meth.GetName(), bIdx+1) + } + + // Fill reference map with referenced request messages + for _, param := range operationObject.Parameters { + if param.Schema != nil && param.Schema.Ref != "" { + refs[param.Schema.Ref] = struct{}{} + } + } + methComments := protoComments(reg, svc.File, nil, "Service", int32(svcIdx), methProtoPath, int32(methIdx)) if err := updateSwaggerDataFromComments(operationObject, methComments); err != nil { panic(err) } + opts, err := extractOperationOptionFromMethodDescriptor(meth.MethodDescriptorProto) + if opts != nil { + if err != nil { + panic(err) + } + if opts.ExternalDocs != nil { + if operationObject.ExternalDocs == nil { + operationObject.ExternalDocs = &swaggerExternalDocumentationObject{} + } + if opts.ExternalDocs.Description != "" { + operationObject.ExternalDocs.Description = opts.ExternalDocs.Description + } + if opts.ExternalDocs.Url != "" { + operationObject.ExternalDocs.URL = opts.ExternalDocs.Url + } + } + // TODO(ivucica): this would be better supported by looking whether the method is deprecated in the proto file + operationObject.Deprecated = opts.Deprecated + + // TODO(ivucica): add remaining fields of operation object + } + switch b.HTTPMethod { case "DELETE": pathItemObject.Delete = operationObject @@ -446,6 +615,9 @@ func renderServices(services []*descriptor.Service, paths swaggerPathsObject, re case "PUT": pathItemObject.Put = operationObject break + case "PATCH": + pathItemObject.Patch = operationObject + break } paths[templateToSwaggerPath(b.PathTmpl.Template)] = pathItemObject } @@ -476,13 +648,16 @@ func applyTemplate(p param) (string, error) { // Loops through all the services and their exposed GET/POST/PUT/DELETE definitions // and create entries for all of them. - renderServices(p.Services, s.Paths, p.reg) + refs := refMap{} + if err := renderServices(p.Services, s.Paths, p.reg, refs); err != nil { + panic(err) + } // Find all the service's messages and enumerations that are defined (recursively) and then // write their request and response types out as definition objects. m := messageMap{} e := enumMap{} - findServicesMessagesAndEnumerations(p.Services, p.reg, m, e) + findServicesMessagesAndEnumerations(p.Services, p.reg, m, e, refs) renderMessagesAsDefinition(m, s.Definitions, p.reg) renderEnumerationsAsDefinition(e, s.Definitions, p.reg) @@ -493,6 +668,73 @@ func applyTemplate(p param) (string, error) { panic(err) } + // There may be additional options in the swagger option in the proto. + spb, err := extractSwaggerOptionFromFileDescriptor(p.FileDescriptorProto) + if err != nil { + panic(err) + } + if spb != nil { + if spb.Swagger != "" { + s.Swagger = spb.Swagger + } + if spb.Info != nil { + if spb.Info.Title != "" { + s.Info.Title = spb.Info.Title + } + if spb.Info.Version != "" { + s.Info.Version = spb.Info.Version + } + if spb.Info.Contact != nil { + if s.Info.Contact == nil { + s.Info.Contact = &swaggerContactObject{} + } + if spb.Info.Contact.Name != "" { + s.Info.Contact.Name = spb.Info.Contact.Name + } + if spb.Info.Contact.Url != "" { + s.Info.Contact.URL = spb.Info.Contact.Url + } + if spb.Info.Contact.Email != "" { + s.Info.Contact.Email = spb.Info.Contact.Email + } + } + } + if spb.Host != "" { + s.Host = spb.Host + } + if spb.BasePath != "" { + s.BasePath = spb.BasePath + } + if len(spb.Schemes) > 0 { + s.Schemes = make([]string, len(spb.Schemes)) + for i, scheme := range spb.Schemes { + s.Schemes[i] = strings.ToLower(scheme.String()) + } + } + if len(spb.Consumes) > 0 { + s.Consumes = make([]string, len(spb.Consumes)) + copy(s.Consumes, spb.Consumes) + } + if len(spb.Produces) > 0 { + s.Produces = make([]string, len(spb.Produces)) + copy(s.Produces, spb.Produces) + } + if spb.ExternalDocs != nil { + if s.ExternalDocs == nil { + s.ExternalDocs = &swaggerExternalDocumentationObject{} + } + if spb.ExternalDocs.Description != "" { + s.ExternalDocs.Description = spb.ExternalDocs.Description + } + if spb.ExternalDocs.Url != "" { + s.ExternalDocs.URL = spb.ExternalDocs.Url + } + } + + // Additional fields on the OpenAPI v2 spec's "Swagger" object + // should be added here, once supported in the proto. + } + // We now have rendered the entire swagger object. Write the bytes out to a // string so it can be written to disk. var w bytes.Buffer @@ -505,9 +747,9 @@ func applyTemplate(p param) (string, error) { // updateSwaggerDataFromComments updates a Swagger object based on a comment // from the proto file. // -// First paragraph of a comment is used for summary. Remaining paragraphs of a -// comment are used for description. If 'Summary' field is not present on the -// passed swaggerObject, the summary and description are joined by \n\n. +// First paragraph of a comment is used for summary. Remaining paragraphs of +// a comment are used for description. If 'Summary' field is not present on +// the passed swaggerObject, the summary and description are joined by \n\n. // // If there is a field named 'Info', its 'Summary' and 'Description' fields // will be updated instead. @@ -537,17 +779,15 @@ func updateSwaggerDataFromComments(swaggerObject interface{}, comment string) er usingTitle = true } + paragraphs := strings.Split(comment, "\n\n") + // If there is a summary (or summary-equivalent), use the first // paragraph as summary, and the rest as description. if summaryValue.CanSet() { - paragraphs := strings.Split(comment, "\n\n") - summary := strings.TrimSpace(paragraphs[0]) description := strings.TrimSpace(strings.Join(paragraphs[1:], "\n\n")) - if !usingTitle || summary == "" || summary[len(summary)-1] != '.' { - if len(summary) > 0 { - summaryValue.Set(reflect.ValueOf(summary)) - } + if !usingTitle || (len(summary) > 0 && summary[len(summary)-1] != '.') { + summaryValue.Set(reflect.ValueOf(summary)) if len(description) > 0 { if !descriptionValue.CanSet() { return fmt.Errorf("Encountered object type with a summary, but no description") @@ -561,22 +801,43 @@ func updateSwaggerDataFromComments(swaggerObject interface{}, comment string) er // There was no summary field on the swaggerObject. Try to apply the // whole comment into description. if descriptionValue.CanSet() { - descriptionValue.Set(reflect.ValueOf(comment)) + descriptionValue.Set(reflect.ValueOf(strings.Join(paragraphs, "\n\n"))) return nil } - return fmt.Errorf("No description nor summary property.") + return fmt.Errorf("no description nor summary property") +} + +func fieldProtoComments(reg *descriptor.Registry, msg *descriptor.Message, field *descriptor.Field) string { + protoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil)), "Field") + for i, f := range msg.Fields { + if f == field { + return protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index), protoPath, int32(i)) + } + } + return "" +} + +func enumValueProtoComments(reg *descriptor.Registry, enum *descriptor.Enum) string { + protoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.EnumDescriptorProto)(nil)), "Value") + var comments []string + for idx, value := range enum.GetValue() { + name := value.GetName() + str := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index), protoPath, int32(idx)) + if str != "" { + comments = append(comments, name+": "+str) + } + } + if len(comments) > 0 { + return "- " + strings.Join(comments, "\n - ") + } + return "" } func protoComments(reg *descriptor.Registry, file *descriptor.File, outers []string, typeName string, typeIndex int32, fieldPaths ...int32) string { if file.SourceCodeInfo == nil { - // Curious! A file without any source code info. - // This could be a test that's providing incomplete - // descriptor.File information. - // - // We could simply return no comments, but panic - // could make debugging easier. - panic("descriptor.File should not contain nil SourceCodeInfo") + fmt.Fprintln(os.Stderr, "descriptor.File should not contain nil SourceCodeInfo") + return "" } outerPaths := make([]int32, len(outers)) @@ -693,16 +954,76 @@ func isProtoPathMatches(paths []int32, outerPaths []int32, typeName string, type func protoPathIndex(descriptorType reflect.Type, what string) int32 { field, ok := descriptorType.Elem().FieldByName(what) if !ok { - panic(fmt.Errorf("Could not find protobuf descriptor type id for %s.", what)) + panic(fmt.Errorf("could not find protobuf descriptor type id for %s", what)) } pbtag := field.Tag.Get("protobuf") if pbtag == "" { - panic(fmt.Errorf("No Go tag 'protobuf' on protobuf descriptor for %s.", what)) + panic(fmt.Errorf("no Go tag 'protobuf' on protobuf descriptor for %s", what)) } path, err := strconv.Atoi(strings.Split(pbtag, ",")[1]) if err != nil { - panic(fmt.Errorf("Protobuf descriptor id for %s cannot be converted to a number: %s", what, err.Error())) + panic(fmt.Errorf("protobuf descriptor id for %s cannot be converted to a number: %s", what, err.Error())) } return int32(path) } + +// extractOperationOptionFromMethodDescriptor extracts the message of type +// swagger_options.Operation from a given proto method's descriptor. +func extractOperationOptionFromMethodDescriptor(meth *pbdescriptor.MethodDescriptorProto) (*swagger_options.Operation, error) { + if meth.Options == nil { + return nil, nil + } + if !proto.HasExtension(meth.Options, swagger_options.E_Openapiv2Operation) { + return nil, nil + } + ext, err := proto.GetExtension(meth.Options, swagger_options.E_Openapiv2Operation) + if err != nil { + return nil, err + } + opts, ok := ext.(*swagger_options.Operation) + if !ok { + return nil, fmt.Errorf("extension is %T; want an Operation", ext) + } + return opts, nil +} + +// extractSchemaOptionFromMessageDescriptor extracts the message of type +// swagger_options.Schema from a given proto message's descriptor. +func extractSchemaOptionFromMessageDescriptor(msg *pbdescriptor.DescriptorProto) (*swagger_options.Schema, error) { + if msg.Options == nil { + return nil, nil + } + if !proto.HasExtension(msg.Options, swagger_options.E_Openapiv2Schema) { + return nil, nil + } + ext, err := proto.GetExtension(msg.Options, swagger_options.E_Openapiv2Schema) + if err != nil { + return nil, err + } + opts, ok := ext.(*swagger_options.Schema) + if !ok { + return nil, fmt.Errorf("extension is %T; want a Schema", ext) + } + return opts, nil +} + +// extractSwaggerOptionFromFileDescriptor extracts the message of type +// swagger_options.Swagger from a given proto method's descriptor. +func extractSwaggerOptionFromFileDescriptor(file *pbdescriptor.FileDescriptorProto) (*swagger_options.Swagger, error) { + if file.Options == nil { + return nil, nil + } + if !proto.HasExtension(file.Options, swagger_options.E_Openapiv2Swagger) { + return nil, nil + } + ext, err := proto.GetExtension(file.Options, swagger_options.E_Openapiv2Swagger) + if err != nil { + return nil, err + } + opts, ok := ext.(*swagger_options.Swagger) + if !ok { + return nil, fmt.Errorf("extension is %T; want a Swagger object", ext) + } + return opts, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template_test.go index 1c897be8acd4..4633a0bbac60 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/template_test.go @@ -7,6 +7,7 @@ import ( "github.com/golang/protobuf/proto" protodescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor" "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule" ) @@ -30,6 +31,169 @@ func crossLinkFixture(f *descriptor.File) *descriptor.File { return f } +func TestMessageToQueryParameters(t *testing.T) { + type test struct { + MsgDescs []*protodescriptor.DescriptorProto + Message string + Params []swaggerParameterObject + } + + tests := []test{ + { + MsgDescs: []*protodescriptor.DescriptorProto{ + &protodescriptor.DescriptorProto{ + Name: proto.String("ExampleMessage"), + Field: []*protodescriptor.FieldDescriptorProto{ + { + Name: proto.String("a"), + Type: protodescriptor.FieldDescriptorProto_TYPE_STRING.Enum(), + Number: proto.Int32(1), + }, + { + Name: proto.String("b"), + Type: protodescriptor.FieldDescriptorProto_TYPE_DOUBLE.Enum(), + Number: proto.Int32(2), + }, + }, + }, + }, + Message: "ExampleMessage", + Params: []swaggerParameterObject{ + swaggerParameterObject{ + Name: "a", + In: "query", + Required: false, + Type: "string", + }, + swaggerParameterObject{ + Name: "b", + In: "query", + Required: false, + Type: "number", + Format: "double", + }, + }, + }, + { + MsgDescs: []*protodescriptor.DescriptorProto{ + &protodescriptor.DescriptorProto{ + Name: proto.String("ExampleMessage"), + Field: []*protodescriptor.FieldDescriptorProto{ + { + Name: proto.String("nested"), + Type: protodescriptor.FieldDescriptorProto_TYPE_MESSAGE.Enum(), + TypeName: proto.String(".example.Nested"), + Number: proto.Int32(1), + }, + }, + }, + &protodescriptor.DescriptorProto{ + Name: proto.String("Nested"), + Field: []*protodescriptor.FieldDescriptorProto{ + { + Name: proto.String("a"), + Type: protodescriptor.FieldDescriptorProto_TYPE_STRING.Enum(), + Number: proto.Int32(1), + }, + { + Name: proto.String("deep"), + Type: protodescriptor.FieldDescriptorProto_TYPE_MESSAGE.Enum(), + TypeName: proto.String(".example.Nested.DeepNested"), + Number: proto.Int32(2), + }, + }, + NestedType: []*protodescriptor.DescriptorProto{{ + Name: proto.String("DeepNested"), + Field: []*protodescriptor.FieldDescriptorProto{ + { + Name: proto.String("b"), + Type: protodescriptor.FieldDescriptorProto_TYPE_STRING.Enum(), + Number: proto.Int32(1), + }, + { + Name: proto.String("c"), + Type: protodescriptor.FieldDescriptorProto_TYPE_ENUM.Enum(), + TypeName: proto.String(".example.Nested.DeepNested.DeepEnum"), + Number: proto.Int32(2), + }, + }, + EnumType: []*protodescriptor.EnumDescriptorProto{ + { + Name: proto.String("DeepEnum"), + Value: []*protodescriptor.EnumValueDescriptorProto{ + {Name: proto.String("FALSE"), Number: proto.Int32(0)}, + {Name: proto.String("TRUE"), Number: proto.Int32(1)}, + }, + }, + }, + }}, + }, + }, + Message: "ExampleMessage", + Params: []swaggerParameterObject{ + swaggerParameterObject{ + Name: "nested.a", + In: "query", + Required: false, + Type: "string", + }, + swaggerParameterObject{ + Name: "nested.deep.b", + In: "query", + Required: false, + Type: "string", + }, + swaggerParameterObject{ + Name: "nested.deep.c", + In: "query", + Required: false, + Type: "string", + Enum: []string{"FALSE", "TRUE"}, + Default: "FALSE", + }, + }, + }, + } + + for _, test := range tests { + reg := descriptor.NewRegistry() + msgs := []*descriptor.Message{} + for _, msgdesc := range test.MsgDescs { + msgs = append(msgs, &descriptor.Message{DescriptorProto: msgdesc}) + } + file := descriptor.File{ + FileDescriptorProto: &protodescriptor.FileDescriptorProto{ + SourceCodeInfo: &protodescriptor.SourceCodeInfo{}, + Name: proto.String("example.proto"), + Package: proto.String("example"), + Dependency: []string{}, + MessageType: test.MsgDescs, + Service: []*protodescriptor.ServiceDescriptorProto{}, + }, + GoPkg: descriptor.GoPackage{ + Path: "example.com/path/to/example/example.pb", + Name: "example_pb", + }, + Messages: msgs, + } + reg.Load(&plugin.CodeGeneratorRequest{ + ProtoFile: []*protodescriptor.FileDescriptorProto{file.FileDescriptorProto}, + }) + + message, err := reg.LookupMsg("", ".example."+test.Message) + if err != nil { + t.Fatalf("failed to lookup message: %s", err) + } + params, err := messageToQueryParameters(message, reg, []descriptor.Parameter{}) + if err != nil { + t.Fatalf("failed to convert message to query parameters: %s", err) + } + if !reflect.DeepEqual(params, test.Params) { + t.Errorf("expected %v, got %v", test.Params, params) + } + } +} + func TestApplyTemplateSimple(t *testing.T) { msgdesc := &protodescriptor.DescriptorProto{ Name: proto.String("ExampleMessage"), @@ -413,6 +577,133 @@ func TestApplyTemplateRequestWithClientStreaming(t *testing.T) { } } +func TestApplyTemplateRequestWithUnusedReferences(t *testing.T) { + reqdesc := &protodescriptor.DescriptorProto{ + Name: proto.String("ExampleMessage"), + Field: []*protodescriptor.FieldDescriptorProto{ + { + Name: proto.String("string"), + Label: protodescriptor.FieldDescriptorProto_LABEL_OPTIONAL.Enum(), + Type: protodescriptor.FieldDescriptorProto_TYPE_STRING.Enum(), + Number: proto.Int32(1), + }, + }, + } + respdesc := &protodescriptor.DescriptorProto{ + Name: proto.String("EmptyMessage"), + } + meth := &protodescriptor.MethodDescriptorProto{ + Name: proto.String("Example"), + InputType: proto.String("ExampleMessage"), + OutputType: proto.String("EmptyMessage"), + ClientStreaming: proto.Bool(false), + ServerStreaming: proto.Bool(false), + } + svc := &protodescriptor.ServiceDescriptorProto{ + Name: proto.String("ExampleService"), + Method: []*protodescriptor.MethodDescriptorProto{meth}, + } + + req := &descriptor.Message{ + DescriptorProto: reqdesc, + } + resp := &descriptor.Message{ + DescriptorProto: respdesc, + } + stringField := &descriptor.Field{ + Message: req, + FieldDescriptorProto: req.GetField()[0], + } + file := descriptor.File{ + FileDescriptorProto: &protodescriptor.FileDescriptorProto{ + SourceCodeInfo: &protodescriptor.SourceCodeInfo{}, + Name: proto.String("example.proto"), + Package: proto.String("example"), + MessageType: []*protodescriptor.DescriptorProto{reqdesc, respdesc}, + Service: []*protodescriptor.ServiceDescriptorProto{svc}, + }, + GoPkg: descriptor.GoPackage{ + Path: "example.com/path/to/example/example.pb", + Name: "example_pb", + }, + Messages: []*descriptor.Message{req, resp}, + Services: []*descriptor.Service{ + { + ServiceDescriptorProto: svc, + Methods: []*descriptor.Method{ + { + MethodDescriptorProto: meth, + RequestType: req, + ResponseType: resp, + Bindings: []*descriptor.Binding{ + { + HTTPMethod: "GET", + PathTmpl: httprule.Template{ + Version: 1, + OpCodes: []int{0, 0}, + Template: "/v1/example", + }, + }, + { + HTTPMethod: "POST", + PathTmpl: httprule.Template{ + Version: 1, + OpCodes: []int{0, 0}, + Template: "/v1/example/{string}", + }, + PathParams: []descriptor.Parameter{ + { + FieldPath: descriptor.FieldPath([]descriptor.FieldPathComponent{ + { + Name: "string", + Target: stringField, + }, + }), + Target: stringField, + }, + }, + Body: &descriptor.Body{ + FieldPath: descriptor.FieldPath([]descriptor.FieldPathComponent{ + { + Name: "string", + Target: stringField, + }, + }), + }, + }, + }, + }, + }, + }, + }, + } + + reg := descriptor.NewRegistry() + reg.Load(&plugin.CodeGeneratorRequest{ProtoFile: []*protodescriptor.FileDescriptorProto{file.FileDescriptorProto}}) + result, err := applyTemplate(param{File: crossLinkFixture(&file), reg: reg}) + if err != nil { + t.Errorf("applyTemplate(%#v) failed with %v; want success", file, err) + return + } + var obj swaggerObject + err = json.Unmarshal([]byte(result), &obj) + if err != nil { + t.Errorf("applyTemplate(%#v) failed with %v; want success", file, err) + return + } + + // Only EmptyMessage must be present, not ExampleMessage + if want, got, name := 1, len(obj.Definitions), "len(Definitions)"; !reflect.DeepEqual(got, want) { + t.Errorf("applyTemplate(%#v).%s = %d want to be %d", file, name, got, want) + } + + // If there was a failure, print out the input and the json result for debugging. + if t.Failed() { + t.Errorf("had: %s", file) + t.Errorf("got: %s", result) + } +} + func TestTemplateToSwaggerPath(t *testing.T) { var tests = []struct { input string @@ -467,7 +758,8 @@ func TestResolveFullyQualifiedNameToSwaggerName(t *testing.T) { } for _, data := range tests { - output := resolveFullyQualifiedNameToSwaggerName(data.input, data.listOfFQMNs) + names := resolveFullyQualifiedNameToSwaggerNames(data.listOfFQMNs) + output := names[data.input] if output != data.output { t.Errorf("Expected fullyQualifiedNameToSwaggerName(%v) to be %s but got %s", data.input, data.output, output) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go index b91e457c0068..7263e66b7e0b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger/types.go @@ -1,6 +1,9 @@ package genswagger import ( + "bytes" + "encoding/json" + "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor" ) @@ -20,9 +23,8 @@ type swaggerInfoObject struct { TermsOfService string `json:"termsOfService,omitempty"` Version string `json:"version"` - Contact *swaggerContactObject `json:"contact,omitempty"` - License *swaggerLicenseObject `json:"license,omitempty"` - ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` + Contact *swaggerContactObject `json:"contact,omitempty"` + License *swaggerLicenseObject `json:"license,omitempty"` } // http://swagger.io/specification/#contactObject @@ -46,15 +48,16 @@ type swaggerExternalDocumentationObject struct { // http://swagger.io/specification/#swaggerObject type swaggerObject struct { - Swagger string `json:"swagger"` - Info swaggerInfoObject `json:"info"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Schemes []string `json:"schemes"` - Consumes []string `json:"consumes"` - Produces []string `json:"produces"` - Paths swaggerPathsObject `json:"paths"` - Definitions swaggerDefinitionsObject `json:"definitions"` + Swagger string `json:"swagger"` + Info swaggerInfoObject `json:"info"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Schemes []string `json:"schemes"` + Consumes []string `json:"consumes"` + Produces []string `json:"produces"` + Paths swaggerPathsObject `json:"paths"` + Definitions swaggerDefinitionsObject `json:"definitions"` + ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` } // http://swagger.io/specification/#pathsObject @@ -66,6 +69,7 @@ type swaggerPathItemObject struct { Delete *swaggerOperationObject `json:"delete,omitempty"` Post *swaggerOperationObject `json:"post,omitempty"` Put *swaggerOperationObject `json:"put,omitempty"` + Patch *swaggerOperationObject `json:"patch,omitempty"` } // http://swagger.io/specification/#operationObject @@ -76,6 +80,7 @@ type swaggerOperationObject struct { Responses swaggerResponsesObject `json:"responses"` Parameters swaggerParametersObject `json:"parameters,omitempty"` Tags []string `json:"tags,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` } @@ -91,6 +96,8 @@ type swaggerParameterObject struct { Type string `json:"type,omitempty"` Format string `json:"format,omitempty"` Items *swaggerItemsObject `json:"items,omitempty"` + Enum []string `json:"enum,omitempty"` + Default string `json:"default,omitempty"` // Or you can explicitly refer to another type. If this is defined all // other fields should be empty @@ -103,6 +110,14 @@ type schemaCore struct { Type string `json:"type,omitempty"` Format string `json:"format,omitempty"` Ref string `json:"$ref,omitempty"` + + Items *swaggerItemsObject `json:"items,omitempty"` + + // If the item is an enumeration include a list of all the *NAMES* of the + // enum values. I'm not sure how well this will work but assuming all enums + // start from 0 index it will be great. I don't think that is a good assumption. + Enum []string `json:"enum,omitempty"` + Default string `json:"default,omitempty"` } type swaggerItemsObject schemaCore @@ -116,22 +131,48 @@ type swaggerResponseObject struct { Schema swaggerSchemaObject `json:"schema"` } +type keyVal struct { + Key string + Value interface{} +} + +type swaggerSchemaObjectProperties []keyVal + +func (op swaggerSchemaObjectProperties) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.WriteString("{") + for i, kv := range op { + if i != 0 { + buf.WriteString(",") + } + key, err := json.Marshal(kv.Key) + if err != nil { + return nil, err + } + buf.Write(key) + buf.WriteString(":") + val, err := json.Marshal(kv.Value) + if err != nil { + return nil, err + } + buf.Write(val) + } + + buf.WriteString("}") + return buf.Bytes(), nil +} + // http://swagger.io/specification/#schemaObject type swaggerSchemaObject struct { schemaCore // Properties can be recursively defined - Properties map[string]swaggerSchemaObject `json:"properties,omitempty"` - AdditionalProperties *swaggerSchemaObject `json:"additionalProperties,omitempty"` - Items *swaggerItemsObject `json:"items,omitempty"` - - // If the item is an enumeration include a list of all the *NAMES* of the - // enum values. I'm not sure how well this will work but assuming all enums - // start from 0 index it will be great. I don't think that is a good assumption. - Enum []string `json:"enum,omitempty"` - Default string `json:"default,omitempty"` + Properties swaggerSchemaObjectProperties `json:"properties,omitempty"` + AdditionalProperties *swaggerSchemaObject `json:"additionalProperties,omitempty"` Description string `json:"description,omitempty"` Title string `json:"title,omitempty"` + + ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` } // http://swagger.io/specification/#referenceObject @@ -149,3 +190,6 @@ type messageMap map[string]*descriptor.Message // Internal type mapping from FQEN to descriptor.Enum. Used as a set by the // findServiceMessages function. type enumMap map[string]*descriptor.Enum + +// Internal type to store used references. +type refMap map[string]struct{} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go index fc3a0030a11e..db747704e231 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main.go @@ -2,6 +2,7 @@ package main import ( "flag" + "fmt" "io" "io/ioutil" "os" @@ -15,8 +16,9 @@ import ( ) var ( - importPrefix = flag.String("import_prefix", "", "prefix to be added to go package paths for imported proto files") - file = flag.String("file", "stdin", "where to load data from") + importPrefix = flag.String("import_prefix", "", "prefix to be added to go package paths for imported proto files") + file = flag.String("file", "stdin", "where to load data from") + allowDeleteBody = flag.Bool("allow_delete_body", false, "unless set, HTTP DELETE methods may not have a body") ) func parseReq(r io.Reader) (*plugin.CodeGeneratorRequest, error) { @@ -50,29 +52,21 @@ func main() { if err != nil { glog.Fatal(err) } + pkgMap := make(map[string]string) if req.Parameter != nil { - for _, p := range strings.Split(req.GetParameter(), ",") { - spec := strings.SplitN(p, "=", 2) - if len(spec) == 1 { - if err := flag.CommandLine.Set(spec[0], ""); err != nil { - glog.Fatalf("Cannot set flag %s", p) - } - continue - } - name, value := spec[0], spec[1] - if strings.HasPrefix(name, "M") { - reg.AddPkgMap(name[1:], value) - continue - } - if err := flag.CommandLine.Set(name, value); err != nil { - glog.Fatalf("Cannot set flag %s", p) - } + err := parseReqParam(req.GetParameter(), flag.CommandLine, pkgMap) + if err != nil { + glog.Fatalf("Error parsing flags: %v", err) } } + reg.SetPrefix(*importPrefix) + reg.SetAllowDeleteBody(*allowDeleteBody) + for k, v := range pkgMap { + reg.AddPkgMap(k, v) + } g := genswagger.New(reg) - reg.SetPrefix(*importPrefix) if err := reg.Load(req); err != nil { emitError(err) return @@ -113,3 +107,38 @@ func emitResp(resp *plugin.CodeGeneratorResponse) { glog.Fatal(err) } } + +// parseReqParam parses a CodeGeneratorRequest parameter and adds the +// extracted values to the given FlagSet and pkgMap. Returns a non-nil +// error if setting a flag failed. +func parseReqParam(param string, f *flag.FlagSet, pkgMap map[string]string) error { + if param == "" { + return nil + } + for _, p := range strings.Split(param, ",") { + spec := strings.SplitN(p, "=", 2) + if len(spec) == 1 { + if spec[0] == "allow_delete_body" { + err := f.Set(spec[0], "true") + if err != nil { + return fmt.Errorf("Cannot set flag %s: %v", p, err) + } + continue + } + err := f.Set(spec[0], "") + if err != nil { + return fmt.Errorf("Cannot set flag %s: %v", p, err) + } + continue + } + name, value := spec[0], spec[1] + if strings.HasPrefix(name, "M") { + pkgMap[name[1:]] = value + continue + } + if err := f.Set(name, value); err != nil { + return fmt.Errorf("Cannot set flag %s: %v", p, err) + } + } + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main_test.go new file mode 100644 index 000000000000..c4d12dd20bd7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/main_test.go @@ -0,0 +1,129 @@ +package main + +import ( + "flag" + "reflect" + "testing" +) + +func TestParseReqParam(t *testing.T) { + + f := flag.CommandLine + + // this one must be first - with no leading clearFlags call it + // verifies our expectation of default values as we reset by + // clearFlags + pkgMap := make(map[string]string) + expected := map[string]string{} + err := parseReqParam("", f, pkgMap) + if err != nil { + t.Errorf("Test 0: unexpected parse error '%v'", err) + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 0: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(false, "stdin", "", t, 0) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{"google/api/annotations.proto": "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api"} + err = parseReqParam("allow_delete_body,file=./foo.pb,import_prefix=/bar/baz,Mgoogle/api/annotations.proto=github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api", f, pkgMap) + if err != nil { + t.Errorf("Test 1: unexpected parse error '%v'", err) + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 1: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(true, "./foo.pb", "/bar/baz", t, 1) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{"google/api/annotations.proto": "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api"} + err = parseReqParam("allow_delete_body=true,file=./foo.pb,import_prefix=/bar/baz,Mgoogle/api/annotations.proto=github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api", f, pkgMap) + if err != nil { + t.Errorf("Test 2: unexpected parse error '%v'", err) + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 2: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(true, "./foo.pb", "/bar/baz", t, 2) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{"a/b/c.proto": "github.com/x/y/z", "f/g/h.proto": "github.com/1/2/3/"} + err = parseReqParam("allow_delete_body=false,Ma/b/c.proto=github.com/x/y/z,Mf/g/h.proto=github.com/1/2/3/", f, pkgMap) + if err != nil { + t.Errorf("Test 3: unexpected parse error '%v'", err) + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 3: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(false, "stdin", "", t, 3) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{} + err = parseReqParam("", f, pkgMap) + if err != nil { + t.Errorf("Test 4: unexpected parse error '%v'", err) + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 4: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(false, "stdin", "", t, 4) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{} + err = parseReqParam("unknown_param=17", f, pkgMap) + if err == nil { + t.Error("Test 5: expected parse error not returned") + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 5: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(false, "stdin", "", t, 5) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{} + err = parseReqParam("Mfoo", f, pkgMap) + if err == nil { + t.Error("Test 6: expected parse error not returned") + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 6: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(false, "stdin", "", t, 6) + + clearFlags() + pkgMap = make(map[string]string) + expected = map[string]string{} + err = parseReqParam("allow_delete_body,file,import_prefix", f, pkgMap) + if err != nil { + t.Errorf("Test 7: unexpected parse error '%v'", err) + } + if !reflect.DeepEqual(pkgMap, expected) { + t.Errorf("Test 7: pkgMap parse error, expected '%v', got '%v'", expected, pkgMap) + } + checkFlags(true, "", "", t, 7) + +} + +func checkFlags(allowDeleteV bool, fileV, importPathV string, t *testing.T, tid int) { + if *importPrefix != importPathV { + t.Errorf("Test %v: import_prefix misparsed, expected '%v', got '%v'", tid, importPathV, *importPrefix) + } + if *file != fileV { + t.Errorf("Test %v: file misparsed, expected '%v', got '%v'", tid, fileV, *file) + } + if *allowDeleteBody != allowDeleteV { + t.Errorf("Test %v: allow_delete_body misparsed, expected '%v', got '%v'", tid, allowDeleteV, *allowDeleteBody) + } +} + +func clearFlags() { + *importPrefix = "" + *file = "stdin" + *allowDeleteBody = false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go new file mode 100644 index 000000000000..7bc93b91c631 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.pb.go @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: protoc-gen-swagger/options/annotations.proto + +package options + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf1 "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +var E_Openapiv2Swagger = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf1.FileOptions)(nil), + ExtensionType: (*Swagger)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_swagger.options.openapiv2_swagger", + Tag: "bytes,1042,opt,name=openapiv2_swagger,json=openapiv2Swagger", + Filename: "protoc-gen-swagger/options/annotations.proto", +} + +var E_Openapiv2Operation = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf1.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_swagger.options.openapiv2_operation", + Tag: "bytes,1042,opt,name=openapiv2_operation,json=openapiv2Operation", + Filename: "protoc-gen-swagger/options/annotations.proto", +} + +var E_Openapiv2Schema = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf1.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_swagger.options.openapiv2_schema", + Tag: "bytes,1042,opt,name=openapiv2_schema,json=openapiv2Schema", + Filename: "protoc-gen-swagger/options/annotations.proto", +} + +var E_Openapiv2Tag = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf1.ServiceOptions)(nil), + ExtensionType: (*Tag)(nil), + Field: 1042, + Name: "grpc.gateway.protoc_gen_swagger.options.openapiv2_tag", + Tag: "bytes,1042,opt,name=openapiv2_tag,json=openapiv2Tag", + Filename: "protoc-gen-swagger/options/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Openapiv2Swagger) + proto.RegisterExtension(E_Openapiv2Operation) + proto.RegisterExtension(E_Openapiv2Schema) + proto.RegisterExtension(E_Openapiv2Tag) +} + +func init() { proto.RegisterFile("protoc-gen-swagger/options/annotations.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0x03, 0x31, + 0x10, 0x85, 0xe9, 0x45, 0x64, 0x55, 0xac, 0xeb, 0x45, 0x8a, 0x68, 0x6f, 0x8a, 0xb4, 0x89, 0xd4, + 0xdb, 0xde, 0x54, 0xf0, 0x26, 0x85, 0x6d, 0x4f, 0x5e, 0x4a, 0x9a, 0x8e, 0xd3, 0x40, 0x9b, 0x09, + 0x49, 0xda, 0x52, 0xe8, 0xd1, 0x5f, 0xe0, 0x2f, 0x16, 0x93, 0xed, 0x56, 0xd6, 0x2a, 0x7b, 0xdb, + 0x99, 0x9d, 0xf7, 0xbe, 0xc7, 0x23, 0x49, 0xc7, 0x58, 0xf2, 0x24, 0xbb, 0x08, 0xba, 0xeb, 0x56, + 0x02, 0x11, 0x2c, 0x27, 0xe3, 0x15, 0x69, 0xc7, 0x85, 0xd6, 0xe4, 0x45, 0xf8, 0x66, 0xe1, 0x2c, + 0xbd, 0x41, 0x6b, 0x24, 0x43, 0xe1, 0x61, 0x25, 0xd6, 0x71, 0x27, 0x47, 0x08, 0x7a, 0x54, 0x48, + 0x59, 0x21, 0x6d, 0xdd, 0xfd, 0x63, 0x4b, 0x06, 0xb4, 0x30, 0x6a, 0xd9, 0x8b, 0x06, 0xad, 0x36, + 0x12, 0xe1, 0x0c, 0x78, 0x98, 0xc6, 0x8b, 0x77, 0x3e, 0x01, 0x27, 0xad, 0x32, 0x9e, 0x6c, 0xbc, + 0xc8, 0x36, 0xc9, 0x59, 0x29, 0xda, 0xa2, 0xd2, 0x4b, 0x16, 0x75, 0x6c, 0xab, 0x63, 0x2f, 0x6a, + 0x06, 0xfd, 0x08, 0xb9, 0xf8, 0x3c, 0x6c, 0x37, 0x6e, 0x8f, 0x7a, 0xf7, 0xac, 0x66, 0x62, 0x36, + 0x88, 0x73, 0xde, 0x2c, 0x49, 0xc5, 0x26, 0xfb, 0x68, 0x24, 0xe7, 0x3b, 0x3c, 0x19, 0xb0, 0xa1, + 0x93, 0xf4, 0xea, 0x57, 0x80, 0x57, 0xf0, 0x53, 0x9a, 0x54, 0x22, 0xf4, 0x6a, 0x47, 0xe8, 0x6f, + 0xad, 0xf3, 0xb4, 0xe4, 0x95, 0xbb, 0x6c, 0x93, 0x34, 0x7f, 0x94, 0x20, 0xa7, 0x30, 0x17, 0xe9, + 0xf5, 0x9e, 0x08, 0xce, 0x09, 0xac, 0xd6, 0xc0, 0xeb, 0xd7, 0x10, 0x8c, 0xf3, 0xd3, 0x5d, 0x0b, + 0x61, 0x91, 0xb9, 0xe4, 0x64, 0x47, 0xf7, 0x02, 0xf7, 0xa0, 0x07, 0x60, 0x97, 0x4a, 0x56, 0xd1, + 0x9d, 0xda, 0xe8, 0xa1, 0xc0, 0xfc, 0xb8, 0x84, 0x0c, 0x05, 0x3e, 0x3d, 0xbf, 0x3d, 0xa2, 0xf2, + 0xd3, 0xc5, 0x98, 0x49, 0x9a, 0xf3, 0x6f, 0x9f, 0x2e, 0x48, 0x72, 0x6b, 0xe7, 0xa1, 0x18, 0x0b, + 0x5b, 0xfe, 0xf7, 0x73, 0x1b, 0x1f, 0x84, 0x7f, 0x0f, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x80, + 0x7f, 0xc1, 0x6a, 0xea, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto new file mode 100644 index 000000000000..8746192b7a87 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/annotations.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_swagger.options; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"; + +import "protoc-gen-swagger/options/openapiv2.proto"; +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.FileOptions { + // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Swagger openapiv2_swagger = 1042; +} +extend google.protobuf.MethodOptions { + // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Operation openapiv2_operation = 1042; +} +extend google.protobuf.MessageOptions { + // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Schema openapiv2_schema = 1042; +} +extend google.protobuf.ServiceOptions { + // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Tag openapiv2_tag = 1042; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go new file mode 100644 index 000000000000..7564c52e5009 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.pb.go @@ -0,0 +1,739 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: protoc-gen-swagger/options/openapiv2.proto + +/* +Package options is a generated protocol buffer package. + +It is generated from these files: + protoc-gen-swagger/options/openapiv2.proto + protoc-gen-swagger/options/annotations.proto + +It has these top-level messages: + Swagger + Operation + Info + Contact + ExternalDocumentation + Schema + JSONSchema + Tag +*/ +package options + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Swagger_SwaggerScheme int32 + +const ( + Swagger_UNKNOWN Swagger_SwaggerScheme = 0 + Swagger_HTTP Swagger_SwaggerScheme = 1 + Swagger_HTTPS Swagger_SwaggerScheme = 2 + Swagger_WS Swagger_SwaggerScheme = 3 + Swagger_WSS Swagger_SwaggerScheme = 4 +) + +var Swagger_SwaggerScheme_name = map[int32]string{ + 0: "UNKNOWN", + 1: "HTTP", + 2: "HTTPS", + 3: "WS", + 4: "WSS", +} +var Swagger_SwaggerScheme_value = map[string]int32{ + "UNKNOWN": 0, + "HTTP": 1, + "HTTPS": 2, + "WS": 3, + "WSS": 4, +} + +func (x Swagger_SwaggerScheme) String() string { + return proto.EnumName(Swagger_SwaggerScheme_name, int32(x)) +} +func (Swagger_SwaggerScheme) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type JSONSchema_JSONSchemaSimpleTypes int32 + +const ( + JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0 + JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1 + JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2 + JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3 + JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4 + JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5 + JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6 + JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7 +) + +var JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARRAY", + 2: "BOOLEAN", + 3: "INTEGER", + 4: "NULL", + 5: "NUMBER", + 6: "OBJECT", + 7: "STRING", +} +var JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{ + "UNKNOWN": 0, + "ARRAY": 1, + "BOOLEAN": 2, + "INTEGER": 3, + "NULL": 4, + "NUMBER": 5, + "OBJECT": 6, + "STRING": 7, +} + +func (x JSONSchema_JSONSchemaSimpleTypes) String() string { + return proto.EnumName(JSONSchema_JSONSchemaSimpleTypes_name, int32(x)) +} +func (JSONSchema_JSONSchemaSimpleTypes) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{6, 0} +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// TODO(ivucica): document fields +type Swagger struct { + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + Schemes []Swagger_SwaggerScheme `protobuf:"varint,5,rep,packed,name=schemes,enum=grpc.gateway.protoc_gen_swagger.options.Swagger_SwaggerScheme" json:"schemes,omitempty"` + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` +} + +func (m *Swagger) Reset() { *m = Swagger{} } +func (m *Swagger) String() string { return proto.CompactTextString(m) } +func (*Swagger) ProtoMessage() {} +func (*Swagger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Swagger) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Swagger) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Swagger) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Swagger) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Swagger) GetSchemes() []Swagger_SwaggerScheme { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Swagger) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Swagger) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Swagger) GetExternalDocs() *ExternalDocumentation { + if m != nil { + return m.ExternalDocs + } + return nil +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// TODO(ivucica): document fields +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocumentation { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// TODO(ivucica): document fields +type Info struct { + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,4,opt,name=contact" json:"contact,omitempty"` + Version string `protobuf:"bytes,6,opt,name=version" json:"version,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// TODO(ivucica): document fields +type Contact struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// TODO(ivucica): document fields +type ExternalDocumentation struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` +} + +func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } +func (m *ExternalDocumentation) String() string { return proto.CompactTextString(m) } +func (*ExternalDocumentation) ProtoMessage() {} +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ExternalDocumentation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocumentation) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// TODO(ivucica): document fields +type Schema struct { + JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema" json:"json_schema,omitempty"` + Discriminator string `protobuf:"bytes,2,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *google_protobuf.Any `protobuf:"bytes,6,opt,name=example" json:"example,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Schema) GetJsonSchema() *JSONSchema { + if m != nil { + return m.JsonSchema + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetExternalDocs() *ExternalDocumentation { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *google_protobuf.Any { + if m != nil { + return m.Example + } + return nil +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// TODO(ivucica): document fields +type JSONSchema struct { + Title string `protobuf:"bytes,5,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + Default string `protobuf:"bytes,7,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,26,rep,name=required" json:"required,omitempty"` + // Items in 'array' must be unique. + Array []string `protobuf:"bytes,34,rep,name=array" json:"array,omitempty"` + Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,enum=grpc.gateway.protoc_gen_swagger.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"` +} + +func (m *JSONSchema) Reset() { *m = JSONSchema{} } +func (m *JSONSchema) String() string { return proto.CompactTextString(m) } +func (*JSONSchema) ProtoMessage() {} +func (*JSONSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *JSONSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *JSONSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *JSONSchema) GetDefault() string { + if m != nil { + return m.Default + } + return "" +} + +func (m *JSONSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *JSONSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *JSONSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *JSONSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *JSONSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *JSONSchema) GetMaxLength() uint64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *JSONSchema) GetMinLength() uint64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *JSONSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *JSONSchema) GetMaxItems() uint64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *JSONSchema) GetMinItems() uint64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *JSONSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *JSONSchema) GetMaxProperties() uint64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *JSONSchema) GetMinProperties() uint64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *JSONSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *JSONSchema) GetArray() []string { + if m != nil { + return m.Array + } + return nil +} + +func (m *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes { + if m != nil { + return m.Type + } + return nil +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +// +// TODO(ivucica): document fields +type Tag struct { + // TODO(ivucica): Description should be extracted from comments on the proto + // service object. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocumentation { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func init() { + proto.RegisterType((*Swagger)(nil), "grpc.gateway.protoc_gen_swagger.options.Swagger") + proto.RegisterType((*Operation)(nil), "grpc.gateway.protoc_gen_swagger.options.Operation") + proto.RegisterType((*Info)(nil), "grpc.gateway.protoc_gen_swagger.options.Info") + proto.RegisterType((*Contact)(nil), "grpc.gateway.protoc_gen_swagger.options.Contact") + proto.RegisterType((*ExternalDocumentation)(nil), "grpc.gateway.protoc_gen_swagger.options.ExternalDocumentation") + proto.RegisterType((*Schema)(nil), "grpc.gateway.protoc_gen_swagger.options.Schema") + proto.RegisterType((*JSONSchema)(nil), "grpc.gateway.protoc_gen_swagger.options.JSONSchema") + proto.RegisterType((*Tag)(nil), "grpc.gateway.protoc_gen_swagger.options.Tag") + proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.Swagger_SwaggerScheme", Swagger_SwaggerScheme_name, Swagger_SwaggerScheme_value) + proto.RegisterEnum("grpc.gateway.protoc_gen_swagger.options.JSONSchema_JSONSchemaSimpleTypes", JSONSchema_JSONSchemaSimpleTypes_name, JSONSchema_JSONSchemaSimpleTypes_value) +} + +func init() { proto.RegisterFile("protoc-gen-swagger/options/openapiv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xed, 0x6e, 0xdb, 0x36, + 0x17, 0x7e, 0x65, 0xd3, 0x36, 0x7d, 0x6c, 0xe7, 0x65, 0xd5, 0x74, 0x63, 0xd3, 0x8f, 0xb9, 0x5e, + 0x87, 0x19, 0x2d, 0xe2, 0x0c, 0xe9, 0xff, 0x01, 0x49, 0x67, 0x74, 0x51, 0x53, 0xbb, 0x90, 0x5d, + 0x74, 0x1b, 0x30, 0x18, 0x8c, 0x4c, 0x3b, 0x1c, 0x24, 0x4a, 0x95, 0xa8, 0xd4, 0xbe, 0x8d, 0x5d, + 0xcf, 0x2e, 0x63, 0xf7, 0xb0, 0x3b, 0xd8, 0x8f, 0xfd, 0x1a, 0x48, 0x51, 0xf9, 0x72, 0x37, 0x04, + 0x45, 0xf7, 0xcb, 0x3c, 0xcf, 0x73, 0xce, 0x23, 0x9e, 0x0f, 0x92, 0x86, 0x27, 0x49, 0x1a, 0xab, + 0x38, 0xd8, 0x5d, 0x72, 0xb9, 0x9b, 0xbd, 0x67, 0xcb, 0x25, 0x4f, 0xf7, 0xe2, 0x44, 0x89, 0x58, + 0x66, 0x7b, 0x71, 0xc2, 0x25, 0x4b, 0xc4, 0xd9, 0xfe, 0xc0, 0x38, 0xb9, 0x5f, 0x2f, 0xd3, 0x24, + 0x18, 0x2c, 0x99, 0xe2, 0xef, 0xd9, 0xba, 0xc0, 0x82, 0xd9, 0x92, 0xcb, 0x99, 0x0d, 0x1c, 0xd8, + 0xc0, 0x9d, 0xbb, 0xcb, 0x38, 0x5e, 0x86, 0x7c, 0xcf, 0xb8, 0x9c, 0xe4, 0x8b, 0x3d, 0x26, 0xad, + 0x7f, 0xef, 0xcf, 0x2a, 0x34, 0x26, 0x85, 0xbb, 0x4b, 0xa1, 0x61, 0x23, 0xa9, 0xd3, 0x75, 0xfa, + 0x4d, 0xbf, 0x34, 0xdd, 0x03, 0x40, 0x42, 0x2e, 0x62, 0x5a, 0xe9, 0x3a, 0xfd, 0xd6, 0xfe, 0xee, + 0xe0, 0x86, 0x1f, 0x1e, 0x1c, 0xc9, 0x45, 0xec, 0x9b, 0x50, 0xd7, 0x05, 0x74, 0x1a, 0x67, 0x8a, + 0x56, 0x8d, 0xb2, 0x59, 0xbb, 0xf7, 0xa0, 0x79, 0xc2, 0x32, 0x3e, 0x4b, 0x98, 0x3a, 0xa5, 0xc8, + 0x10, 0x58, 0x03, 0xaf, 0x99, 0x3a, 0x75, 0x7f, 0x80, 0x46, 0x16, 0x9c, 0xf2, 0x88, 0x67, 0xb4, + 0xd6, 0xad, 0xf6, 0xb7, 0xf6, 0xbf, 0xbd, 0xf1, 0x67, 0x6d, 0x42, 0xe5, 0xef, 0xc4, 0xc8, 0xf8, + 0xa5, 0x9c, 0xbb, 0x03, 0x38, 0x88, 0x65, 0x96, 0x6b, 0xe9, 0x7a, 0xb7, 0xaa, 0xbf, 0x5a, 0xda, + 0x9a, 0x4b, 0xd2, 0x78, 0x9e, 0x07, 0x3c, 0xa3, 0x8d, 0x82, 0x2b, 0x6d, 0x37, 0x80, 0x0e, 0x5f, + 0x29, 0x9e, 0x4a, 0x16, 0xce, 0xe6, 0x71, 0x90, 0xd1, 0x2d, 0x53, 0x8e, 0x9b, 0xef, 0x6b, 0x68, + 0xa3, 0xbf, 0x8b, 0x83, 0x3c, 0xe2, 0x52, 0x31, 0x0d, 0xfb, 0x6d, 0x7e, 0x01, 0x67, 0xbd, 0x43, + 0xe8, 0x5c, 0xd9, 0xb6, 0xdb, 0x82, 0xc6, 0x9b, 0xd1, 0xcb, 0xd1, 0xf8, 0xed, 0x88, 0xfc, 0xcf, + 0xc5, 0x80, 0xbe, 0x9f, 0x4e, 0x5f, 0x13, 0xc7, 0x6d, 0x42, 0x4d, 0xaf, 0x26, 0xa4, 0xe2, 0xd6, + 0xa1, 0xf2, 0x76, 0x42, 0xaa, 0x6e, 0x03, 0xaa, 0x6f, 0x27, 0x13, 0x82, 0x3c, 0x84, 0x31, 0x69, + 0x7a, 0x08, 0x37, 0x09, 0x78, 0x08, 0x03, 0x69, 0x79, 0x08, 0xb7, 0x48, 0xdb, 0x43, 0xb8, 0x4d, + 0x3a, 0x1e, 0xc2, 0x1d, 0xb2, 0xd5, 0xfb, 0xa3, 0x02, 0xcd, 0x71, 0xc2, 0x53, 0xb3, 0x07, 0xdd, + 0x1d, 0xc5, 0x96, 0x19, 0x75, 0x4c, 0xca, 0x66, 0x6d, 0xc6, 0x21, 0x8f, 0x22, 0x96, 0xae, 0x4d, + 0xdf, 0xf5, 0x38, 0x14, 0xa6, 0xdb, 0x85, 0xd6, 0x9c, 0x67, 0x41, 0x2a, 0x4c, 0x5e, 0xb6, 0xa5, + 0x97, 0xa1, 0xcd, 0x52, 0xa1, 0x4f, 0x5f, 0x2a, 0xf7, 0x11, 0xb4, 0xe3, 0x32, 0x83, 0x99, 0x98, + 0xd3, 0x5a, 0xb1, 0x8f, 0x73, 0xec, 0x68, 0xfe, 0xd1, 0xad, 0xa6, 0x17, 0xc3, 0x07, 0x86, 0x3a, + 0x1f, 0x9e, 0x87, 0x00, 0x73, 0x9e, 0xa4, 0x3c, 0x60, 0x8a, 0xcf, 0x69, 0xab, 0xeb, 0xf4, 0xb1, + 0x7f, 0x09, 0xb9, 0x56, 0xfb, 0x36, 0xe9, 0xf4, 0x7e, 0x77, 0x00, 0xe9, 0x83, 0xe0, 0x6e, 0x43, + 0x4d, 0x09, 0x15, 0x72, 0x7b, 0xba, 0x0a, 0xe3, 0x7a, 0x31, 0x2b, 0x9b, 0xc5, 0xec, 0x03, 0x51, + 0x3c, 0x8d, 0xb2, 0x59, 0xbc, 0x98, 0x65, 0x3c, 0x3d, 0x13, 0x01, 0xb7, 0x35, 0xdf, 0x32, 0xf8, + 0x78, 0x31, 0x29, 0x50, 0xd7, 0x83, 0x46, 0x10, 0x4b, 0xc5, 0x02, 0x65, 0x0b, 0xfe, 0xcd, 0x8d, + 0x0b, 0xfe, 0xbc, 0x88, 0xf3, 0x4b, 0x01, 0x5d, 0x82, 0x33, 0x9e, 0x66, 0x7a, 0x4f, 0xf5, 0xa2, + 0xfd, 0xd6, 0xf4, 0x10, 0xae, 0x91, 0x7a, 0x6f, 0x08, 0x0d, 0x1b, 0xa3, 0xa7, 0x47, 0xb2, 0xa8, + 0xcc, 0xcb, 0xac, 0x5d, 0x02, 0xd5, 0x3c, 0x0d, 0x6d, 0x3a, 0x7a, 0xa9, 0xd3, 0xe7, 0x11, 0x13, + 0xa1, 0xdd, 0x7b, 0x61, 0xf4, 0x5e, 0xc2, 0x9d, 0x0f, 0xf6, 0xfa, 0x7a, 0x5d, 0x9c, 0xcd, 0xba, + 0x6c, 0x7c, 0xa2, 0xf7, 0x5b, 0x05, 0xea, 0xe6, 0xd8, 0x30, 0x77, 0x0a, 0xad, 0x5f, 0xb2, 0x58, + 0xce, 0x4c, 0xdf, 0x98, 0x09, 0x6f, 0xed, 0x3f, 0xbb, 0x71, 0x39, 0xbc, 0xc9, 0x78, 0x54, 0x28, + 0xf9, 0xa0, 0x75, 0xac, 0xea, 0x63, 0xe8, 0xcc, 0x85, 0xde, 0x41, 0x24, 0x24, 0x53, 0x71, 0x6a, + 0x3f, 0x7e, 0x15, 0xd4, 0xf7, 0x5a, 0xca, 0xd9, 0x7c, 0x16, 0xcb, 0x70, 0x6d, 0xb2, 0xc5, 0x3e, + 0xd6, 0xc0, 0x58, 0x86, 0xeb, 0xcd, 0xa3, 0x51, 0xfb, 0x0f, 0x8e, 0xc6, 0x00, 0x1a, 0x7c, 0xc5, + 0xa2, 0x24, 0xe4, 0xa6, 0x79, 0xad, 0xfd, 0xed, 0x41, 0xf1, 0x06, 0x0c, 0xca, 0x37, 0x60, 0x70, + 0x20, 0xd7, 0x7e, 0xe9, 0xe4, 0x21, 0x8c, 0x48, 0xad, 0xf7, 0x57, 0x1d, 0xe0, 0x22, 0xf1, 0x8b, + 0x79, 0xad, 0xfd, 0xcb, 0xbc, 0xd6, 0x37, 0xfb, 0x42, 0xa1, 0x31, 0xe7, 0x0b, 0x96, 0x87, 0x8a, + 0x36, 0x8a, 0xc9, 0xb1, 0xa6, 0xfb, 0x05, 0xb4, 0xa2, 0x3c, 0x54, 0x22, 0x09, 0xf9, 0x2c, 0x5e, + 0x50, 0xe8, 0x3a, 0x7d, 0xc7, 0x87, 0x12, 0x1a, 0x2f, 0x74, 0x68, 0xc4, 0x56, 0x22, 0xca, 0x23, + 0x73, 0xb4, 0x1c, 0xbf, 0x34, 0xdd, 0xa7, 0x70, 0x8b, 0xaf, 0x82, 0x30, 0xcf, 0xc4, 0x19, 0x9f, + 0x95, 0x3e, 0x6d, 0x53, 0x5b, 0x72, 0x4e, 0xbc, 0xb2, 0xce, 0x5a, 0x46, 0x48, 0xe3, 0xd2, 0xb1, + 0x32, 0x85, 0x79, 0x4d, 0xc6, 0xfa, 0x6c, 0x5d, 0x97, 0xb1, 0xce, 0x0f, 0x00, 0x22, 0xb6, 0x9a, + 0x85, 0x5c, 0x2e, 0xd5, 0x29, 0xfd, 0x7f, 0xd7, 0xe9, 0x23, 0xbf, 0x19, 0xb1, 0xd5, 0xb1, 0x01, + 0x0c, 0x2d, 0x64, 0x49, 0x13, 0x4b, 0x0b, 0x69, 0x69, 0x0a, 0x8d, 0x84, 0x29, 0xdd, 0x14, 0x7a, + 0xab, 0x28, 0x83, 0x35, 0xf5, 0x7c, 0x68, 0x5d, 0xa1, 0x78, 0x94, 0xd1, 0x6d, 0x13, 0x87, 0x23, + 0xb6, 0x3a, 0xd2, 0xb6, 0x21, 0x85, 0xb4, 0xe4, 0x1d, 0x4b, 0x0a, 0x59, 0x90, 0x8f, 0xa0, 0x9d, + 0x4b, 0xf1, 0x2e, 0xe7, 0x96, 0xff, 0xcc, 0xec, 0xbc, 0x55, 0x60, 0x85, 0xcb, 0x57, 0xb0, 0xa5, + 0xc5, 0x93, 0x54, 0xdf, 0x83, 0x4a, 0xf0, 0x8c, 0x52, 0x23, 0xd2, 0x89, 0xd8, 0xea, 0xf5, 0x39, + 0x68, 0xdc, 0x84, 0xbc, 0xec, 0x76, 0xd7, 0xba, 0x09, 0x79, 0xc9, 0x6d, 0x07, 0x70, 0xca, 0xdf, + 0xe5, 0x22, 0xe5, 0x73, 0xba, 0x53, 0x5c, 0x92, 0xa5, 0xad, 0xe7, 0x83, 0xa5, 0x29, 0x5b, 0xd3, + 0x9e, 0x21, 0x0a, 0xc3, 0xfd, 0x19, 0x90, 0x5a, 0x27, 0x9c, 0x7e, 0x69, 0x1e, 0xed, 0xa3, 0x8f, + 0x38, 0x71, 0x97, 0x96, 0x13, 0xa1, 0xc7, 0x73, 0xba, 0x4e, 0x78, 0xe6, 0x1b, 0xd9, 0xde, 0x7b, + 0xb8, 0xf3, 0x41, 0xfa, 0xea, 0x3b, 0xd9, 0x84, 0xda, 0x81, 0xef, 0x1f, 0xfc, 0x48, 0x1c, 0x8d, + 0x1f, 0x8e, 0xc7, 0xc7, 0xc3, 0x83, 0x11, 0xa9, 0x68, 0xe3, 0x68, 0x34, 0x1d, 0xbe, 0x18, 0xfa, + 0xa4, 0xaa, 0x1f, 0xd3, 0xd1, 0x9b, 0xe3, 0x63, 0x82, 0x5c, 0x80, 0xfa, 0xe8, 0xcd, 0xab, 0xc3, + 0xa1, 0x4f, 0x6a, 0x7a, 0x3d, 0x3e, 0xf4, 0x86, 0xcf, 0xa7, 0xa4, 0xae, 0xd7, 0x93, 0xa9, 0x7f, + 0x34, 0x7a, 0x41, 0x1a, 0x1e, 0xc2, 0x0e, 0xa9, 0x78, 0x08, 0x57, 0x48, 0xd5, 0x43, 0xb8, 0x6a, + 0x9e, 0x59, 0x44, 0x6a, 0xd7, 0x2e, 0x7c, 0x97, 0xdc, 0xf6, 0x10, 0xbe, 0x4d, 0xb6, 0x3d, 0x84, + 0x3f, 0x27, 0xd4, 0x43, 0xf8, 0x1e, 0xb9, 0xef, 0x21, 0x7c, 0x9f, 0x3c, 0xf0, 0x10, 0x7e, 0x40, + 0x1e, 0x7a, 0x08, 0x3f, 0x24, 0x3d, 0x0f, 0xe1, 0xc7, 0xe4, 0x89, 0x87, 0xf0, 0x13, 0xf2, 0xd4, + 0x43, 0xf8, 0x29, 0x19, 0xf4, 0x7e, 0x75, 0xa0, 0x3a, 0x65, 0xcb, 0x1b, 0xbc, 0x07, 0x1b, 0x37, + 0x48, 0xf5, 0xd3, 0xdf, 0x20, 0x45, 0xba, 0x87, 0xcf, 0x7f, 0x3a, 0x58, 0x0a, 0x75, 0x9a, 0x9f, + 0x0c, 0x82, 0x38, 0xda, 0xd3, 0xfa, 0xbb, 0x3c, 0x88, 0xb3, 0x75, 0xa6, 0xb8, 0x35, 0xed, 0xe7, + 0xf6, 0xfe, 0xf9, 0x7f, 0xeb, 0x49, 0xdd, 0x70, 0xcf, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf8, + 0x2b, 0xe7, 0x47, 0xdc, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto new file mode 100644 index 000000000000..6c1854613dd4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/openapiv2.proto @@ -0,0 +1,223 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_swagger.options; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"; + +import "google/protobuf/any.proto"; + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// TODO(ivucica): document fields +message Swagger { + string swagger = 1; + Info info = 2; + string host = 3; + string base_path = 4; + enum SwaggerScheme { + UNKNOWN = 0; + HTTP = 1; + HTTPS = 2; + WS = 3; + WSS = 4; + } + repeated SwaggerScheme schemes = 5; + repeated string consumes = 6; + repeated string produces = 7; + // field 8 is reserved for 'paths'. + reserved 8; + // field 9 is reserved for 'definitions', which at this time are already + // exposed as and customizable as proto messages. + reserved 9; + // field 10 is reserved for 'responses'. + reserved 10; + // field 11 is reserved for 'securityDefinitions'. + reserved 11; + // field 12 is reserved for repeated 'security'. + reserved 12; + // field 13 is reserved for 'tags', which are supposed to be exposed as and + // customizable as proto services. TODO(ivucica): add processing of proto + // service objects into OpenAPI v2 Tag objects. + reserved 13; + ExternalDocumentation external_docs = 14; +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// TODO(ivucica): document fields +message Operation { + repeated string tags = 1; + string summary = 2; + string description = 3; + ExternalDocumentation external_docs = 4; + string operation_id = 5; + repeated string consumes = 6; + repeated string produces = 7; + // field 8 is reserved for 'parameters'. + reserved 8; + // field 9 is reserved for 'responses'. + reserved 9; + repeated string schemes = 10; + bool deprecated = 11; + // field 12 is reserved for 'security'. + reserved 12; + +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// TODO(ivucica): document fields +message Info { + string title = 1; + string description = 2; + string terms_of_service = 3; + Contact contact = 4; + // field 5 is reserved for 'license'. + reserved 5; + string version = 6; +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// TODO(ivucica): document fields +message Contact { + string name = 1; + string url = 2; + string email = 3; +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// TODO(ivucica): document fields +message ExternalDocumentation { + string description = 1; + string url = 2; +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// TODO(ivucica): document fields +message Schema { + JSONSchema json_schema = 1; + string discriminator = 2; + bool read_only = 3; + // field 4 is reserved for 'xml'. + reserved 4; + ExternalDocumentation external_docs = 5; + google.protobuf.Any example = 6; +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// TODO(ivucica): document fields +message JSONSchema { + // field 1 is reserved for '$id', omitted from OpenAPI v2. + reserved 1; + // field 2 is reserved for '$schema', omitted from OpenAPI v2. + reserved 2; + // field 3 is reserved for '$ref', although it is unclear how it would be used. + reserved 3; + // field 4 is reserved for '$comment', omitted from OpenAPI v2. + reserved 4; + string title = 5; + string description = 6; + string default = 7; + // field 8 is reserved for 'readOnly', which has an OpenAPI v2-specific meaning and is defined there. + reserved 8; + // field 9 is reserved for 'examples', which is omitted from OpenAPI v2 in favor of 'example' field. + reserved 9; + double multiple_of = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + uint64 max_length = 15; + uint64 min_length = 16; + string pattern = 17; + // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. + reserved 18; + // field 19 is reserved for 'items', but in OpenAPI-specific way. TODO(ivucica): add 'items'? + reserved 19; + uint64 max_items = 20; + uint64 min_items = 21; + bool unique_items = 22; + // field 23 is reserved for 'contains', omitted from OpenAPI v2. + reserved 23; + uint64 max_properties = 24; + uint64 min_properties = 25; + repeated string required = 26; + // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific way. TODO(ivucica): add 'additionalProperties'? + reserved 27; + // field 28 is reserved for 'definitions', omitted from OpenAPI v2. + reserved 28; + // field 29 is reserved for 'properties', but in OpenAPI-specific way. TODO(ivucica): add 'additionalProperties'? + reserved 29; + // following fields are reserved, as the properties have been omitted from OpenAPI v2: + // patternProperties, dependencies, propertyNames, const + reserved 30 to 33; + // Items in 'array' must be unique. + repeated string array = 34; + + enum JSONSchemaSimpleTypes { + UNKNOWN = 0; + ARRAY = 1; + BOOLEAN = 2; + INTEGER = 3; + NULL = 4; + NUMBER = 5; + OBJECT = 6; + STRING = 7; + } + + repeated JSONSchemaSimpleTypes type = 35; + // following fields are reserved, as the properties have been omitted from OpenAPI v2: + // format, contentMediaType, contentEncoding, if, then, else + reserved 36 to 41; + // field 42 is reserved for 'allOf', but in OpenAPI-specific way. TODO(ivucica): add 'allOf'? + reserved 42; + // following fields are reserved, as the properties have been omitted from OpenAPI v2: + // anyOf, oneOf, not + reserved 43 to 45; +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +// +// TODO(ivucica): document fields +message Tag { + // field 1 is reserved for 'name'. In our generator, this is (to be) extracted + // from the name of proto service, and thus not exposed to the user, as + // changing tag object's name would break the link to the references to the + // tag in individual operation specifications. + // + // TODO(ivucica): Add 'name' property. Use it to allow override of the name of + // global Tag object, then use that name to reference the tag throughout the + // Swagger file. + reserved 1; + // TODO(ivucica): Description should be extracted from comments on the proto + // service object. + string description = 2; + ExternalDocumentation external_docs = 3; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go index f248c738b232..6e0eb27e2858 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -9,18 +9,23 @@ import ( "time" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) -// MetadataHeaderPrefix is prepended to HTTP headers in order to convert them to -// gRPC metadata for incoming requests processed by grpc-gateway +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields. +const MetadataPrefix = "grpcgateway-" + // MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to // HTTP headers in a response handled by grpc-gateway const MetadataTrailerPrefix = "Grpc-Trailer-" + const metadataGrpcTimeout = "Grpc-Timeout" const xForwardedFor = "X-Forwarded-For" @@ -39,25 +44,25 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) { +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { var err error timeout, err = timeoutDecode(tm) if err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) } } for key, vals := range req.Header { for _, val := range vals { - if key == "Authorization" { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if strings.ToLower(key) == "authorization" { pairs = append(pairs, "authorization", val) - continue } - if strings.HasPrefix(key, MetadataHeaderPrefix) { - pairs = append(pairs, key[len(MetadataHeaderPrefix):], val) + if h, ok := mux.incomingHeaderMatcher(key); ok { + pairs = append(pairs, h, val) } } } @@ -85,7 +90,11 @@ func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, e if len(pairs) == 0 { return ctx, nil } - return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil + md := metadata.Pairs(pairs...) + if mux.metadataAnnotator != nil { + md = metadata.Join(md, mux.metadataAnnotator(ctx, req)) + } + return metadata.NewOutgoingContext(ctx, md), nil } // ServerMetadata consists of metadata sent from gRPC server. @@ -141,3 +150,38 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { } return } + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context_test.go index abc1873fad9b..955d6f135788 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context_test.go @@ -23,12 +23,12 @@ func TestAnnotateContext_WorksWithEmpty(t *testing.T) { t.Fatalf("http.NewRequest(%q, %q, nil) failed with %v; want success", "GET", "http://www.example.com", err) } request.Header.Add("Some-Irrelevant-Header", "some value") - annotated, err := runtime.AnnotateContext(ctx, request) + annotated, err := runtime.AnnotateContext(ctx, runtime.NewServeMux(), request) if err != nil { t.Errorf("runtime.AnnotateContext(ctx, %#v) failed with %v; want success", request, err) return } - md, ok := metadata.FromContext(annotated) + md, ok := metadata.FromOutgoingContext(annotated) if !ok || len(md) != emptyForwardMetaCount { t.Errorf("Expected %d metadata items in context; got %v", emptyForwardMetaCount, md) } @@ -45,20 +45,23 @@ func TestAnnotateContext_ForwardsGrpcMetadata(t *testing.T) { request.Header.Add("Grpc-Metadata-Foo-BAZ", "Value2") request.Header.Add("Grpc-Metadata-foo-bAz", "Value3") request.Header.Add("Authorization", "Token 1234567890") - annotated, err := runtime.AnnotateContext(ctx, request) + annotated, err := runtime.AnnotateContext(ctx, runtime.NewServeMux(), request) if err != nil { t.Errorf("runtime.AnnotateContext(ctx, %#v) failed with %v; want success", request, err) return } - md, ok := metadata.FromContext(annotated) - if got, want := len(md), emptyForwardMetaCount+3; !ok || got != want { - t.Errorf("Expected %d metadata items in context; got %d", got, want) + md, ok := metadata.FromOutgoingContext(annotated) + if got, want := len(md), emptyForwardMetaCount+4; !ok || got != want { + t.Errorf("metadata items in context = %d want %d: %v", got, want, md) } if got, want := md["foobar"], []string{"Value1"}; !reflect.DeepEqual(got, want) { - t.Errorf(`md["foobar"] = %q; want %q`, got, want) + t.Errorf(`md["grpcgateway-foobar"] = %q; want %q`, got, want) } if got, want := md["foo-baz"], []string{"Value2", "Value3"}; !reflect.DeepEqual(got, want) { - t.Errorf(`md["foo-baz"] = %q want %q`, got, want) + t.Errorf(`md["grpcgateway-foo-baz"] = %q want %q`, got, want) + } + if got, want := md["grpcgateway-authorization"], []string{"Token 1234567890"}; !reflect.DeepEqual(got, want) { + t.Errorf(`md["grpcgateway-authorization"] = %q want %q`, got, want) } if got, want := md["authorization"], []string{"Token 1234567890"}; !reflect.DeepEqual(got, want) { t.Errorf(`md["authorization"] = %q want %q`, got, want) @@ -74,12 +77,12 @@ func TestAnnotateContext_XForwardedFor(t *testing.T) { request.Header.Add("X-Forwarded-For", "192.0.2.100") // client request.RemoteAddr = "192.0.2.200:12345" // proxy - annotated, err := runtime.AnnotateContext(ctx, request) + annotated, err := runtime.AnnotateContext(ctx, runtime.NewServeMux(), request) if err != nil { t.Errorf("runtime.AnnotateContext(ctx, %#v) failed with %v; want success", request, err) return } - md, ok := metadata.FromContext(annotated) + md, ok := metadata.FromOutgoingContext(annotated) if !ok || len(md) != emptyForwardMetaCount+1 { t.Errorf("Expected %d metadata items in context; got %v", emptyForwardMetaCount+1, md) } @@ -98,7 +101,7 @@ func TestAnnotateContext_SupportsTimeouts(t *testing.T) { if err != nil { t.Fatalf(`http.NewRequest("GET", "http://example.com", nil failed with %v; want success`, err) } - annotated, err := runtime.AnnotateContext(ctx, request) + annotated, err := runtime.AnnotateContext(ctx, runtime.NewServeMux(), request) if err != nil { t.Errorf("runtime.AnnotateContext(ctx, %#v) failed with %v; want success", request, err) return @@ -110,7 +113,7 @@ func TestAnnotateContext_SupportsTimeouts(t *testing.T) { const acceptableError = 50 * time.Millisecond runtime.DefaultContextTimeout = 10 * time.Second - annotated, err = runtime.AnnotateContext(ctx, request) + annotated, err = runtime.AnnotateContext(ctx, runtime.NewServeMux(), request) if err != nil { t.Errorf("runtime.AnnotateContext(ctx, %#v) failed with %v; want success", request, err) return @@ -153,7 +156,7 @@ func TestAnnotateContext_SupportsTimeouts(t *testing.T) { }, } { request.Header.Set("Grpc-Timeout", spec.timeout) - annotated, err = runtime.AnnotateContext(ctx, request) + annotated, err = runtime.AnnotateContext(ctx, runtime.NewServeMux(), request) if err != nil { t.Errorf("runtime.AnnotateContext(ctx, %#v) failed with %v; want success", request, err) return diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go index 0d3cb3bf3caf..8eebdcf49f49 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -6,9 +6,9 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) // HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. @@ -64,7 +64,7 @@ var ( type errorBody struct { Error string `protobuf:"bytes,1,name=error" json:"error"` - Code int `protobuf:"bytes,2,name=code" json:"code"` + Code int32 `protobuf:"varint,2,name=code" json:"code"` } //Make this also conform to proto.Message for builtin JSONPb Marshaler @@ -78,14 +78,20 @@ func (*errorBody) ProtoMessage() {} // // The response body returned by this function is a JSON object, // which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { const fallback = `{"error": "failed to marshal error message"}` w.Header().Del("Trailer") w.Header().Set("Content-Type", marshaler.ContentType()) + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + body := &errorBody{ - Error: grpc.ErrorDesc(err), - Code: int(grpc.Code(err)), + Error: s.Message(), + Code: int32(s.Code()), } buf, merr := marshaler.Marshal(body) @@ -103,9 +109,9 @@ func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseW grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(grpc.Code(err)) + st := HTTPStatusFromCode(s.Code()) w.WriteHeader(st) if _, err := w.Write(buf); err != nil { grpclog.Printf("Failed to write response: %v", err) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors_test.go index 2bdfca637c1f..ee5dfb3c0372 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors_test.go @@ -10,8 +10,8 @@ import ( "github.com/grpc-ecosystem/grpc-gateway/runtime" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestDefaultHTTPError(t *testing.T) { @@ -28,14 +28,14 @@ func TestDefaultHTTPError(t *testing.T) { msg: "example error", }, { - err: grpc.Errorf(codes.NotFound, "no such resource"), + err: status.Error(codes.NotFound, "no such resource"), status: http.StatusNotFound, msg: "no such resource", }, } { w := httptest.NewRecorder() req, _ := http.NewRequest("", "", nil) // Pass in an empty request to match the signature - runtime.DefaultHTTPError(ctx, &runtime.JSONBuiltin{}, w, req, spec.err) + runtime.DefaultHTTPError(ctx, &runtime.ServeMux{}, &runtime.JSONBuiltin{}, w, req, spec.err) if got, want := w.Header().Get("Content-Type"), "application/json"; got != want { t.Errorf(`w.Header().Get("Content-Type") = %q; want %q; on spec.err=%v`, got, want, spec.err) @@ -49,6 +49,7 @@ func TestDefaultHTTPError(t *testing.T) { t.Errorf("json.Unmarshal(%q, &body) failed with %v; want success", w.Body.Bytes(), err) continue } + if got, want := body["error"].(string), spec.msg; !strings.Contains(got, want) { t.Errorf(`body["error"] = %q; want %q; on spec.err=%v`, got, want, spec.err) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go index d7040851ae9c..ae6a5d551cfb 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -9,12 +9,13 @@ import ( "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime/internal" "golang.org/x/net/context" - "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) // ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) if !ok { grpclog.Printf("Flush not supported in %T", w) @@ -28,7 +29,7 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp http.Error(w, "unexpected error", http.StatusInternalServerError) return } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Content-Type", marshaler.ContentType()) @@ -57,7 +58,7 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp grpclog.Printf("Failed to marshal response chunk: %v", err) return } - if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil { + if _, err = w.Write(buf); err != nil { grpclog.Printf("Failed to send response chunk: %v", err) return } @@ -65,11 +66,12 @@ func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.Resp } } -func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) { +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { for k, vs := range md.HeaderMD { - hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k) - for i := range vs { - w.Header().Add(hKey, vs[i]) + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } } } } @@ -84,31 +86,31 @@ func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { for k, vs := range md.TrailerMD { tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for i := range vs { - w.Header().Add(tKey, vs[i]) + for _, v := range vs { + w.Header().Add(tKey, v) } } } // ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { grpclog.Printf("Failed to extract ServerMetadata from context") } - handleForwardResponseServerMetadata(w, md) + handleForwardResponseServerMetadata(w, mux, md) handleForwardResponseTrailerHeader(w, md) w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, marshaler, w, req, err) + HTTPError(ctx, mux, marshaler, w, req, err) return } buf, err := marshaler.Marshal(resp) if err != nil { grpclog.Printf("Marshal error: %v", err) - HTTPError(ctx, marshaler, w, req, err) + HTTPError(ctx, mux, marshaler, w, req, err) return } @@ -146,7 +148,10 @@ func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter func streamChunk(result proto.Message, err error) map[string]proto.Message { if err != nil { - grpcCode := grpc.Code(err) + grpcCode := codes.Unknown + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + } httpCode := HTTPStatusFromCode(grpcCode) return map[string]proto.Message{ "error": &internal.StreamError{ diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler_test.go new file mode 100644 index 000000000000..8c61ee1510a2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler_test.go @@ -0,0 +1,67 @@ +package runtime_test + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/grpc-ecosystem/grpc-gateway/examples/examplepb" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "golang.org/x/net/context" +) + +func TestForwardResponseStream(t *testing.T) { + var ( + msgs = []proto.Message{ + &pb.SimpleMessage{Id: "One"}, + &pb.SimpleMessage{Id: "Two"}, + } + + ctx = runtime.NewServerMetadataContext( + context.Background(), runtime.ServerMetadata{}, + ) + mux = runtime.NewServeMux() + marshaler = &runtime.JSONPb{} + req = httptest.NewRequest("GET", "http://example.com/foo", nil) + resp = httptest.NewRecorder() + count = 0 + recv = func() (proto.Message, error) { + if count >= len(msgs) { + return nil, io.EOF + } + count++ + return msgs[count-1], nil + } + ) + + runtime.ForwardResponseStream(ctx, mux, marshaler, resp, req, recv) + + w := resp.Result() + if w.StatusCode != http.StatusOK { + t.Errorf(" got %d want %d", w.StatusCode, http.StatusOK) + } + if h := w.Header.Get("Transfer-Encoding"); h != "chunked" { + t.Errorf("ForwardResponseStream missing header chunked") + } + body, err := ioutil.ReadAll(w.Body) + if err != nil { + t.Errorf("Failed to read response body with %v", err) + } + w.Body.Close() + + var want []byte + for _, msg := range msgs { + b, err := marshaler.Marshal(map[string]proto.Message{"result": msg}) + if err != nil { + t.Errorf("marshaler.Marshal() failed %v", err) + } + want = append(want, b...) + } + + if string(body) != string(want) { + t.Errorf("ForwardResponseStream() = \"%s\" want \"%s\"", body, want) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go index 6f837cfd5d95..44550f393b4c 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: runtime/internal/stream_chunk.proto -// DO NOT EDIT! /* Package internal is a generated protocol buffer package. @@ -42,6 +41,34 @@ func (m *StreamError) String() string { return proto.CompactTextStrin func (*StreamError) ProtoMessage() {} func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + func init() { proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") } @@ -50,7 +77,7 @@ func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDesc var fileDescriptor0 = []byte{ // 181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, 0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1, 0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c, 0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6, diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb_test.go index 01e7ce872516..482a45bfd719 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb_test.go @@ -19,7 +19,13 @@ import ( func TestJSONPbMarshal(t *testing.T) { msg := examplepb.ABitOfEverything{ - Uuid: "6EC2446F-7E89-4127-B3E6-5C05E6BECBA7", + SingleNested: &examplepb.ABitOfEverything_Nested{}, + RepeatedStringValue: []string{}, + MappedStringValue: map[string]string{}, + MappedNestedValue: map[string]*examplepb.ABitOfEverything_Nested{}, + RepeatedEnumValue: []examplepb.NumericEnum{}, + TimestampValue: ×tamp.Timestamp{}, + Uuid: "6EC2446F-7E89-4127-B3E6-5C05E6BECBA7", Nested: []*examplepb.ABitOfEverything_Nested{ { Name: "foo", @@ -37,7 +43,7 @@ func TestJSONPbMarshal(t *testing.T) { }, } - for _, spec := range []struct { + for i, spec := range []struct { enumsAsInts, emitDefaults bool indent string origName bool @@ -105,7 +111,7 @@ func TestJSONPbMarshal(t *testing.T) { t.Errorf("jsonpb.UnmarshalString(%q, &got) failed with %v; want success; spec=%v", string(buf), err, spec) } if want := msg; !reflect.DeepEqual(got, want) { - t.Errorf("got = %v; want %v; spec=%v", &got, &want, spec) + t.Errorf("case %d: got = %v; want %v; spec=%v", i, &got, &want, spec) } if spec.verifier != nil { spec.verifier(string(buf)) @@ -143,7 +149,7 @@ func TestJSONPbUnmarshal(t *testing.T) { m runtime.JSONPb got examplepb.ABitOfEverything ) - for _, data := range []string{ + for i, data := range []string{ `{ "uuid": "6EC2446F-7E89-4127-B3E6-5C05E6BECBA7", "nested": [ @@ -185,7 +191,7 @@ func TestJSONPbUnmarshal(t *testing.T) { }`, } { if err := m.Unmarshal([]byte(data), &got); err != nil { - t.Errorf("m.Unmarshal(%q, &got) failed with %v; want success", data, err) + t.Errorf("case %d: m.Unmarshal(%q, &got) failed with %v; want success", i, data, err) } want := examplepb.ABitOfEverything{ @@ -208,7 +214,7 @@ func TestJSONPbUnmarshal(t *testing.T) { } if !reflect.DeepEqual(got, want) { - t.Errorf("got = %v; want = %v", &got, &want) + t.Errorf("case %d: got = %v; want = %v", i, &got, &want) } } } @@ -232,7 +238,13 @@ func TestJSONPbUnmarshalFields(t *testing.T) { func TestJSONPbEncoder(t *testing.T) { msg := examplepb.ABitOfEverything{ - Uuid: "6EC2446F-7E89-4127-B3E6-5C05E6BECBA7", + SingleNested: &examplepb.ABitOfEverything_Nested{}, + RepeatedStringValue: []string{}, + MappedStringValue: map[string]string{}, + MappedNestedValue: map[string]*examplepb.ABitOfEverything_Nested{}, + RepeatedEnumValue: []examplepb.NumericEnum{}, + TimestampValue: ×tamp.Timestamp{}, + Uuid: "6EC2446F-7E89-4127-B3E6-5C05E6BECBA7", Nested: []*examplepb.ABitOfEverything_Nested{ { Name: "foo", @@ -249,7 +261,7 @@ func TestJSONPbEncoder(t *testing.T) { }, } - for _, spec := range []struct { + for i, spec := range []struct { enumsAsInts, emitDefaults bool indent string origName bool @@ -319,7 +331,7 @@ func TestJSONPbEncoder(t *testing.T) { t.Errorf("jsonpb.UnmarshalString(%q, &got) failed with %v; want success; spec=%v", buf.String(), err, spec) } if want := msg; !reflect.DeepEqual(got, want) { - t.Errorf("got = %v; want %v; spec=%v", &got, &want, spec) + t.Errorf("case %d: got = %v; want %v; spec=%v", i, &got, &want, spec) } if spec.verifier != nil { spec.verifier(buf.String()) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go index 2e6c56213029..205bc4309214 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -1,12 +1,16 @@ package runtime import ( + "fmt" "net/http" + "net/textproto" "strings" - "golang.org/x/net/context" - "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // A HandlerFunc handles a specific pair of path pattern and HTTP method. @@ -19,6 +23,10 @@ type ServeMux struct { handlers map[string][]handler forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotator func(context.Context, *http.Request) metadata.MD + protoErrorHandler ProtoErrorHandlerFunc } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -36,6 +44,64 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. } } +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotator = annotator + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ @@ -47,6 +113,29 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { for _, opt := range opts { opt(serveMux) } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + return serveMux } @@ -57,9 +146,17 @@ func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { // ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + path := r.URL.Path if !strings.HasPrefix(path, "/") { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } return } @@ -67,7 +164,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { l := len(components) var verb string if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } return } else if idx > 0 { c := components[l-1] @@ -77,7 +180,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) { r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } return } } @@ -104,17 +213,36 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // X-HTTP-Method-Override is optional. Always allow fallback to POST. if isPathLengthFallback(r) { if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } return } h.h(w, r, pathParams) return } - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } return } } - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } } // GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go index 3947dbea023a..8a9ec2cdae40 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -21,7 +21,7 @@ type op struct { operand int } -// Pattern is a template pattern of http request paths defined in third_party/googleapis/google/api/http.proto. +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. type Pattern struct { // ops is a list of operations ops []op diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 000000000000..b1b089273b6d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,61 @@ +package runtime + +import ( + "io" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + w.Header().Del("Trailer") + w.Header().Set("Content-Type", marshaler.ContentType()) + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Printf("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Printf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Printf("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Printf("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go index 56a919a52f1c..c00e0b914e27 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -4,7 +4,9 @@ import ( "fmt" "net/url" "reflect" + "strconv" "strings" + "time" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/utilities" @@ -38,31 +40,39 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] if m.Kind() != reflect.Ptr { return fmt.Errorf("unexpected type %T: %v", msg, msg) } + var props *proto.Properties m = m.Elem() for i, fieldName := range fieldPath { isLast := i == len(fieldPath)-1 if !isLast && m.Kind() != reflect.Struct { return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) } - f := fieldByProtoName(m, fieldName) - if !f.IsValid() { + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) return nil } switch f.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } m = f case reflect.Slice: // TODO(yugui) Support []byte if !isLast { return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) } - return populateRepeatedField(f, values) + return populateRepeatedField(f, values, props) case reflect.Ptr: if f.IsNil() { m = reflect.New(f.Type().Elem()) - f.Set(m) + f.Set(m.Convert(f.Type())) } m = f.Elem() continue @@ -80,39 +90,127 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values [] default: grpclog.Printf("too many field values: %s", strings.Join(fieldPath, ".")) } - return populateField(m, values[0]) + return populateField(m, values[0], props) } // fieldByProtoName looks up a field whose corresponding protobuf field name is "name". // "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) reflect.Value { +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + if op, ok := props.OneofTypes[name]; ok { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + for _, p := range props.Prop { if p.OrigName == name { - return m.FieldByName(p.Name) + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil } } - return reflect.Value{} + return reflect.Value{}, nil, nil } -func populateRepeatedField(f reflect.Value, values []string) error { +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + conv, ok := convFromType[elemType.Kind()] if !ok { return fmt.Errorf("unsupported field type %s", elemType) } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values))) + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) for i, v := range values { result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) if err := result[1].Interface(); err != nil { return err.(error) } - f.Index(i).Set(result[0]) + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) } return nil } -func populateField(f reflect.Value, value string) error { +func populateField(f reflect.Value, value string, props *proto.Properties) error { + // Handle well known type + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := f.Addr().Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "Timestamp": + if value == "null" { + f.Field(0).SetInt(0) + f.Field(1).SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.Field(0).SetInt(int64(t.Unix())) + f.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.Field(0).SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.Field(0).SetBool(true) + } else if value == "false" { + f.Field(0).SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.Field(0).SetString(value) + return nil + } + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + conv, ok := convFromType[f.Kind()] if !ok { return fmt.Errorf("unsupported field type %T", f) @@ -121,7 +219,48 @@ func populateField(f reflect.Value, value string) error { if err := result[1].Interface(); err != nil { return err.(error) } - f.Set(result[0]) + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } return nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query_test.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query_test.go index cf2d4285616c..95a72a4af1c5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query_test.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query_test.go @@ -1,43 +1,139 @@ package runtime_test import ( + "errors" + "fmt" "net/url" + "reflect" "testing" + "time" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" ) func TestPopulateParameters(t *testing.T) { + timeT := time.Date(2016, time.December, 15, 12, 23, 32, 49, time.UTC) + timeStr := timeT.Format(time.RFC3339Nano) + timePb, err := ptypes.TimestampProto(timeT) + if err != nil { + t.Fatalf("Couldn't setup timestamp in Protobuf format: %v", err) + } + for _, spec := range []struct { - values url.Values - filter *utilities.DoubleArray - want proto.Message + values url.Values + filter *utilities.DoubleArray + want proto.Message + wanterr error }{ { values: url.Values{ - "float_value": {"1.5"}, - "double_value": {"2.5"}, - "int64_value": {"-1"}, - "int32_value": {"-2"}, - "uint64_value": {"3"}, - "uint32_value": {"4"}, - "bool_value": {"true"}, - "string_value": {"str"}, - "repeated_value": {"a", "b", "c"}, + "float_value": {"1.5"}, + "double_value": {"2.5"}, + "int64_value": {"-1"}, + "int32_value": {"-2"}, + "uint64_value": {"3"}, + "uint32_value": {"4"}, + "bool_value": {"true"}, + "string_value": {"str"}, + "repeated_value": {"a", "b", "c"}, + "enum_value": {"1"}, + "repeated_enum": {"1", "2", "0"}, + "timestamp_value": {timeStr}, + "wrapper_float_value": {"1.5"}, + "wrapper_double_value": {"2.5"}, + "wrapper_int64_value": {"-1"}, + "wrapper_int32_value": {"-2"}, + "wrapper_u_int64_value": {"3"}, + "wrapper_u_int32_value": {"4"}, + "wrapper_bool_value": {"true"}, + "wrapper_string_value": {"str"}, }, filter: utilities.NewDoubleArray(nil), want: &proto3Message{ - FloatValue: 1.5, - DoubleValue: 2.5, - Int64Value: -1, - Int32Value: -2, - Uint64Value: 3, - Uint32Value: 4, - BoolValue: true, - StringValue: "str", - RepeatedValue: []string{"a", "b", "c"}, + FloatValue: 1.5, + DoubleValue: 2.5, + Int64Value: -1, + Int32Value: -2, + Uint64Value: 3, + Uint32Value: 4, + BoolValue: true, + StringValue: "str", + RepeatedValue: []string{"a", "b", "c"}, + EnumValue: EnumValue_Y, + RepeatedEnum: []EnumValue{EnumValue_Y, EnumValue_Z, EnumValue_X}, + TimestampValue: timePb, + WrapperFloatValue: &wrappers.FloatValue{1.5}, + WrapperDoubleValue: &wrappers.DoubleValue{2.5}, + WrapperInt64Value: &wrappers.Int64Value{-1}, + WrapperInt32Value: &wrappers.Int32Value{-2}, + WrapperUInt64Value: &wrappers.UInt64Value{3}, + WrapperUInt32Value: &wrappers.UInt32Value{4}, + WrapperBoolValue: &wrappers.BoolValue{true}, + WrapperStringValue: &wrappers.StringValue{"str"}, + }, + }, + { + values: url.Values{ + "floatValue": {"1.5"}, + "doubleValue": {"2.5"}, + "int64Value": {"-1"}, + "int32Value": {"-2"}, + "uint64Value": {"3"}, + "uint32Value": {"4"}, + "boolValue": {"true"}, + "stringValue": {"str"}, + "repeatedValue": {"a", "b", "c"}, + "enumValue": {"1"}, + "repeatedEnum": {"1", "2", "0"}, + "timestampValue": {timeStr}, + "wrapperFloatValue": {"1.5"}, + "wrapperDoubleValue": {"2.5"}, + "wrapperInt64Value": {"-1"}, + "wrapperInt32Value": {"-2"}, + "wrapperUInt64Value": {"3"}, + "wrapperUInt32Value": {"4"}, + "wrapperBoolValue": {"true"}, + "wrapperStringValue": {"str"}, + }, + filter: utilities.NewDoubleArray(nil), + want: &proto3Message{ + FloatValue: 1.5, + DoubleValue: 2.5, + Int64Value: -1, + Int32Value: -2, + Uint64Value: 3, + Uint32Value: 4, + BoolValue: true, + StringValue: "str", + RepeatedValue: []string{"a", "b", "c"}, + EnumValue: EnumValue_Y, + RepeatedEnum: []EnumValue{EnumValue_Y, EnumValue_Z, EnumValue_X}, + TimestampValue: timePb, + WrapperFloatValue: &wrappers.FloatValue{1.5}, + WrapperDoubleValue: &wrappers.DoubleValue{2.5}, + WrapperInt64Value: &wrappers.Int64Value{-1}, + WrapperInt32Value: &wrappers.Int32Value{-2}, + WrapperUInt64Value: &wrappers.UInt64Value{3}, + WrapperUInt32Value: &wrappers.UInt32Value{4}, + WrapperBoolValue: &wrappers.BoolValue{true}, + WrapperStringValue: &wrappers.StringValue{"str"}, + }, + }, + { + values: url.Values{ + "enum_value": {"EnumValue_Z"}, + "repeated_enum": {"EnumValue_X", "2", "0"}, + }, + filter: utilities.NewDoubleArray(nil), + want: &proto3Message{ + EnumValue: EnumValue_Z, + RepeatedEnum: []EnumValue{EnumValue_X, EnumValue_Z, EnumValue_X}, }, }, { @@ -51,6 +147,37 @@ func TestPopulateParameters(t *testing.T) { "bool_value": {"true"}, "string_value": {"str"}, "repeated_value": {"a", "b", "c"}, + "enum_value": {"1"}, + "repeated_enum": {"1", "2", "0"}, + }, + filter: utilities.NewDoubleArray(nil), + want: &proto2Message{ + FloatValue: proto.Float32(1.5), + DoubleValue: proto.Float64(2.5), + Int64Value: proto.Int64(-1), + Int32Value: proto.Int32(-2), + Uint64Value: proto.Uint64(3), + Uint32Value: proto.Uint32(4), + BoolValue: proto.Bool(true), + StringValue: proto.String("str"), + RepeatedValue: []string{"a", "b", "c"}, + EnumValue: EnumValue_Y, + RepeatedEnum: []EnumValue{EnumValue_Y, EnumValue_Z, EnumValue_X}, + }, + }, + { + values: url.Values{ + "floatValue": {"1.5"}, + "doubleValue": {"2.5"}, + "int64Value": {"-1"}, + "int32Value": {"-2"}, + "uint64Value": {"3"}, + "uint32Value": {"4"}, + "boolValue": {"true"}, + "stringValue": {"str"}, + "repeatedValue": {"a", "b", "c"}, + "enumValue": {"1"}, + "repeatedEnum": {"1", "2", "0"}, }, filter: utilities.NewDoubleArray(nil), want: &proto2Message{ @@ -63,6 +190,8 @@ func TestPopulateParameters(t *testing.T) { BoolValue: proto.Bool(true), StringValue: proto.String("str"), RepeatedValue: []string{"a", "b", "c"}, + EnumValue: EnumValue_Y, + RepeatedEnum: []EnumValue{EnumValue_Y, EnumValue_Z, EnumValue_X}, }, }, { @@ -99,12 +228,47 @@ func TestPopulateParameters(t *testing.T) { Uint64Value: 1, }, }, + { + values: url.Values{ + "oneof_string_value": {"foobar"}, + }, + filter: utilities.NewDoubleArray(nil), + want: &proto3Message{ + OneofValue: &proto3Message_OneofStringValue{"foobar"}, + }, + }, + { + values: url.Values{ + "oneof_bool_value": {"true"}, + }, + filter: utilities.NewDoubleArray(nil), + want: &proto3Message{ + OneofValue: &proto3Message_OneofBoolValue{true}, + }, + }, + { + // Don't allow setting a oneof more than once + values: url.Values{ + "oneof_bool_value": {"true"}, + "oneof_string_value": {"foobar"}, + }, + filter: utilities.NewDoubleArray(nil), + want: &proto3Message{}, + wanterr: errors.New("field already set for oneof_value oneof"), + }, } { msg := proto.Clone(spec.want) msg.Reset() err := runtime.PopulateQueryParameters(msg, spec.values, spec.filter) + if spec.wanterr != nil { + if !reflect.DeepEqual(err, spec.wanterr) { + t.Errorf("runtime.PopulateQueryParameters(msg, %v, %v) failed with %v; want error %v", spec.values, spec.filter, err, spec.wanterr) + } + continue + } + if err != nil { - t.Errorf("runtime.PoplateQueryParameters(msg, %v, %v) failed with %v; want success", spec.values, spec.filter, err) + t.Errorf("runtime.PopulateQueryParameters(msg, %v, %v) failed with %v; want success", spec.values, spec.filter, err) continue } if got, want := msg, spec.want; !proto.Equal(got, want) { @@ -197,18 +361,129 @@ func TestPopulateParametersWithFilters(t *testing.T) { } } +func TestPopulateQueryParametersWithInvalidNestedParameters(t *testing.T) { + for _, spec := range []struct { + msg proto.Message + values url.Values + filter *utilities.DoubleArray + }{ + { + msg: &proto3Message{}, + values: url.Values{ + "float_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "double_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "int64_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "int32_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "uint64_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "uint32_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "bool_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "string_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "repeated_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "enum_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "enum_value.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + { + msg: &proto3Message{}, + values: url.Values{ + "repeated_enum.nested": {"test"}, + }, + filter: utilities.NewDoubleArray(nil), + }, + } { + spec.msg.Reset() + err := runtime.PopulateQueryParameters(spec.msg, spec.values, spec.filter) + if err == nil { + t.Errorf("runtime.PopulateQueryParameters(msg, %v, %v) did not fail; want error", spec.values, spec.filter) + } + } +} + type proto3Message struct { - Nested *proto2Message `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - NestedNonNull proto2Message `protobuf:"bytes,11,opt,name=nested_non_null" json:"nested_non_null,omitempty"` - FloatValue float32 `protobuf:"fixed32,2,opt,name=float_value" json:"float_value,omitempty"` - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` - Int64Value int64 `protobuf:"varint,4,opt,name=int64_value" json:"int64_value,omitempty"` - Int32Value int32 `protobuf:"varint,5,opt,name=int32_value" json:"int32_value,omitempty"` - Uint64Value uint64 `protobuf:"varint,6,opt,name=uint64_value" json:"uint64_value,omitempty"` - Uint32Value uint32 `protobuf:"varint,7,opt,name=uint32_value" json:"uint32_value,omitempty"` - BoolValue bool `protobuf:"varint,8,opt,name=bool_value" json:"bool_value,omitempty"` - StringValue string `protobuf:"bytes,9,opt,name=string_value" json:"string_value,omitempty"` - RepeatedValue []string `protobuf:"bytes,10,rep,name=repeated_value" json:"repeated_value,omitempty"` + Nested *proto2Message `protobuf:"bytes,1,opt,name=nested,json=nested" json:"nested,omitempty"` + NestedNonNull proto2Message `protobuf:"bytes,15,opt,name=nested_non_null,json=nestedNonNull" json:"nested_non_null,omitempty"` + FloatValue float32 `protobuf:"fixed32,2,opt,name=float_value,json=floatValue" json:"float_value,omitempty"` + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + Int64Value int64 `protobuf:"varint,4,opt,name=int64_value,json=int64Value" json:"int64_value,omitempty"` + Int32Value int32 `protobuf:"varint,5,opt,name=int32_value,json=int32Value" json:"int32_value,omitempty"` + Uint64Value uint64 `protobuf:"varint,6,opt,name=uint64_value,json=uint64Value" json:"uint64_value,omitempty"` + Uint32Value uint32 `protobuf:"varint,7,opt,name=uint32_value,json=uint32Value" json:"uint32_value,omitempty"` + BoolValue bool `protobuf:"varint,8,opt,name=bool_value,json=boolValue" json:"bool_value,omitempty"` + StringValue string `protobuf:"bytes,9,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + RepeatedValue []string `protobuf:"bytes,10,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` + EnumValue EnumValue `protobuf:"varint,11,opt,name=enum_value,json=enumValue,enum=runtime_test_api.EnumValue" json:"enum_value,omitempty"` + RepeatedEnum []EnumValue `protobuf:"varint,12,rep,packed,name=repeated_enum,json=repeatedEnum,enum=runtime_test_api.EnumValue" json:"repeated_enum,omitempty"` + TimestampValue *timestamp.Timestamp `protobuf:"bytes,16,opt,name=timestamp_value,json=timestampValue" json:"timestamp_value,omitempty"` + OneofValue proto3Message_OneofValue `protobuf_oneof:"oneof_value"` + WrapperDoubleValue *wrappers.DoubleValue `protobuf:"bytes,17,opt,name=wrapper_double_value,json=wrapperDoubleValue" json:"wrapper_double_value,omitempty"` + WrapperFloatValue *wrappers.FloatValue `protobuf:"bytes,18,opt,name=wrapper_float_value,json=wrapperFloatValue" json:"wrapper_float_value,omitempty"` + WrapperInt64Value *wrappers.Int64Value `protobuf:"bytes,19,opt,name=wrapper_int64_value,json=wrapperInt64Value" json:"wrapper_int64_value,omitempty"` + WrapperInt32Value *wrappers.Int32Value `protobuf:"bytes,20,opt,name=wrapper_int32_value,json=wrapperInt32Value" json:"wrapper_int32_value,omitempty"` + WrapperUInt64Value *wrappers.UInt64Value `protobuf:"bytes,21,opt,name=wrapper_u_int64_value,json=wrapperUInt64Value" json:"wrapper_u_int64_value,omitempty"` + WrapperUInt32Value *wrappers.UInt32Value `protobuf:"bytes,22,opt,name=wrapper_u_int32_value,json=wrapperUInt32Value" json:"wrapper_u_int32_value,omitempty"` + WrapperBoolValue *wrappers.BoolValue `protobuf:"bytes,23,opt,name=wrapper_bool_value,json=wrapperBoolValue" json:"wrapper_bool_value,omitempty"` + WrapperStringValue *wrappers.StringValue `protobuf:"bytes,24,opt,name=wrapper_string_value,json=wrapperStringValue" json:"wrapper_string_value,omitempty"` } func (m *proto3Message) Reset() { *m = proto3Message{} } @@ -222,17 +497,123 @@ func (m *proto3Message) GetNested() *proto2Message { return nil } +type proto3Message_OneofValue interface { + proto3Message_OneofValue() +} + +type proto3Message_OneofBoolValue struct { + OneofBoolValue bool `protobuf:"varint,13,opt,name=oneof_bool_value,json=oneofBoolValue,oneof"` +} +type proto3Message_OneofStringValue struct { + OneofStringValue string `protobuf:"bytes,14,opt,name=oneof_string_value,json=oneofStringValue,oneof"` +} + +func (*proto3Message_OneofBoolValue) proto3Message_OneofValue() {} +func (*proto3Message_OneofStringValue) proto3Message_OneofValue() {} + +func (m *proto3Message) GetOneofValue() proto3Message_OneofValue { + if m != nil { + return m.OneofValue + } + return nil +} + +func (m *proto3Message) GetOneofBoolValue() bool { + if x, ok := m.GetOneofValue().(*proto3Message_OneofBoolValue); ok { + return x.OneofBoolValue + } + return false +} + +func (m *proto3Message) GetOneofStringValue() string { + if x, ok := m.GetOneofValue().(*proto3Message_OneofStringValue); ok { + return x.OneofStringValue + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*proto3Message) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _proto3Message_OneofMarshaler, _proto3Message_OneofUnmarshaler, _proto3Message_OneofSizer, []interface{}{ + (*proto3Message_OneofBoolValue)(nil), + (*proto3Message_OneofStringValue)(nil), + } +} + +func _proto3Message_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*proto3Message) + // oneof_value + switch x := m.OneofValue.(type) { + case *proto3Message_OneofBoolValue: + t := uint64(0) + if x.OneofBoolValue { + t = 1 + } + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *proto3Message_OneofStringValue: + b.EncodeVarint(14<<3 | proto.WireBytes) + b.EncodeStringBytes(x.OneofStringValue) + case nil: + default: + return fmt.Errorf("proto3Message.OneofValue has unexpected type %T", x) + } + return nil +} + +func _proto3Message_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*proto3Message) + switch tag { + case 14: // oneof_value.oneof_bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofValue = &proto3Message_OneofBoolValue{x != 0} + return true, err + case 15: // oneof_value.oneof_string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.OneofValue = &proto3Message_OneofStringValue{x} + return true, err + default: + return false, nil + } +} + +func _proto3Message_OneofSizer(msg proto.Message) (n int) { + m := msg.(*proto3Message) + // oneof_value + switch x := m.OneofValue.(type) { + case *proto3Message_OneofBoolValue: + n += proto.SizeVarint(14<<3 | proto.WireVarint) + n += 1 + case *proto3Message_OneofStringValue: + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.OneofStringValue))) + n += len(x.OneofStringValue) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type proto2Message struct { - Nested *proto3Message `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - FloatValue *float32 `protobuf:"fixed32,2,opt,name=float_value" json:"float_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"` - Int64Value *int64 `protobuf:"varint,4,opt,name=int64_value" json:"int64_value,omitempty"` - Int32Value *int32 `protobuf:"varint,5,opt,name=int32_value" json:"int32_value,omitempty"` - Uint64Value *uint64 `protobuf:"varint,6,opt,name=uint64_value" json:"uint64_value,omitempty"` - Uint32Value *uint32 `protobuf:"varint,7,opt,name=uint32_value" json:"uint32_value,omitempty"` - BoolValue *bool `protobuf:"varint,8,opt,name=bool_value" json:"bool_value,omitempty"` - StringValue *string `protobuf:"bytes,9,opt,name=string_value" json:"string_value,omitempty"` - RepeatedValue []string `protobuf:"bytes,10,rep,name=repeated_value" json:"repeated_value,omitempty"` + Nested *proto3Message `protobuf:"bytes,1,opt,name=nested,json=nested" json:"nested,omitempty"` + FloatValue *float32 `protobuf:"fixed32,2,opt,name=float_value,json=floatValue" json:"float_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + Int64Value *int64 `protobuf:"varint,4,opt,name=int64_value,json=int64Value" json:"int64_value,omitempty"` + Int32Value *int32 `protobuf:"varint,5,opt,name=int32_value,json=int32Value" json:"int32_value,omitempty"` + Uint64Value *uint64 `protobuf:"varint,6,opt,name=uint64_value,json=uint64Value" json:"uint64_value,omitempty"` + Uint32Value *uint32 `protobuf:"varint,7,opt,name=uint32_value,json=uint32Value" json:"uint32_value,omitempty"` + BoolValue *bool `protobuf:"varint,8,opt,name=bool_value,json=boolValue" json:"bool_value,omitempty"` + StringValue *string `protobuf:"bytes,9,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + RepeatedValue []string `protobuf:"bytes,10,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` + EnumValue EnumValue `protobuf:"varint,11,opt,name=enum_value,json=enumValue,enum=runtime_test_api.EnumValue" json:"enum_value,omitempty"` + RepeatedEnum []EnumValue `protobuf:"varint,12,rep,packed,name=repeated_enum,json=repeatedEnum,enum=runtime_test_api.EnumValue" json:"repeated_enum,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -309,3 +690,26 @@ func (m *proto2Message) GetRepeatedValue() []string { } return nil } + +type EnumValue int32 + +const ( + EnumValue_X EnumValue = 0 + EnumValue_Y EnumValue = 1 + EnumValue_Z EnumValue = 2 +) + +var EnumValue_name = map[int32]string{ + 0: "EnumValue_X", + 1: "EnumValue_Y", + 2: "EnumValue_Z", +} +var EnumValue_value = map[string]int32{ + "EnumValue_X": 0, + "EnumValue_Y": 1, + "EnumValue_Z": 2, +} + +func init() { + proto.RegisterEnum("runtime_test_api.EnumValue", EnumValue_name, EnumValue_value) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/README.grcp-gateway b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/README.grpc-gateway similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/README.grcp-gateway rename to vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/README.grpc-gateway diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.pb.go deleted file mode 100644 index 2409a86c4bff..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.pb.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/api/annotations.proto -// DO NOT EDIT! - -/* -Package google_api is a generated protocol buffer package. - -It is generated from these files: - google/api/annotations.proto - google/api/http.proto - -It has these top-level messages: - HttpRule - CustomHttpPattern -*/ -package google_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -var E_Http = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MethodOptions)(nil), - ExtensionType: (*HttpRule)(nil), - Field: 72295728, - Name: "google.api.http", - Tag: "bytes,72295728,opt,name=http", -} - -func init() { - proto.RegisterExtension(E_Http) -} - -func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, - 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, - 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, - 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, - 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, - 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, - 0x10, 0xd8, 0x10, 0x27, 0x15, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, - 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x4c, 0x62, 0x03, 0x5b, 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, - 0xff, 0x4f, 0xd1, 0x89, 0x83, 0xde, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.proto index cbd18b847f39..85c361b47fed 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.proto +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/annotations.proto @@ -19,9 +19,11 @@ package google.api; import "google/api/http.proto"; import "google/protobuf/descriptor.proto"; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "AnnotationsProto"; option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; extend google.protobuf.MethodOptions { // See `HttpRule`. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.pb.go deleted file mode 100644 index c572235923ca..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.pb.go +++ /dev/null @@ -1,360 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/api/http.proto -// DO NOT EDIT! - -package google_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API -// methods. The mapping determines what portions of the request message are -// populated from the path, query parameters, or body of the HTTP request. The -// mapping is typically specified as an `google.api.http` annotation, see -// "google/api/annotations.proto" for details. -// -// The mapping consists of a mandatory field specifying a path template and an -// optional `body` field specifying what data is represented in the HTTP request -// body. The field name for the path indicates the HTTP method. Example: -// -// ``` -// package google.storage.v2; -// -// import "google/api/annotations.proto"; -// -// service Storage { -// rpc CreateObject(CreateObjectRequest) returns (Object) { -// option (google.api.http) { -// post: "/v2/{bucket_name=buckets/*}/objects" -// body: "object" -// }; -// }; -// } -// ``` -// -// Here `bucket_name` and `object` bind to fields of the request message -// `CreateObjectRequest`. -// -// The rules for mapping HTTP path, query parameters, and body fields -// to the request message are as follows: -// -// 1. The `body` field specifies either `*` or a field path, or is -// omitted. If omitted, it assumes there is no HTTP body. -// 2. Leaf fields (recursive expansion of nested messages in the -// request) can be classified into three types: -// (a) Matched in the URL template. -// (b) Covered by body (if body is `*`, everything except (a) fields; -// else everything under the body field) -// (c) All other fields. -// 3. URL query parameters found in the HTTP request are mapped to (c) fields. -// 4. Any body sent with an HTTP request can contain only (b) fields. -// -// The syntax of the path template is as follows: -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// `*` matches a single path component, `**` zero or more path components, and -// `LITERAL` a constant. A `Variable` can match an entire path as specified -// again by a template; this nested template must not contain further variables. -// If no template is given with a variable, it matches a single path component. -// The notation `{var}` is henceforth equivalent to `{var=*}`. -// -// Use CustomHttpPattern to specify any HTTP method that is not included in the -// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for -// a given URL path rule. The wild-card rule is useful for services that provide -// content to Web (HTML) clients. -type HttpRule struct { - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - // - // Types that are valid to be assigned to Pattern: - // *HttpRule_Get - // *HttpRule_Put - // *HttpRule_Post - // *HttpRule_Delete - // *HttpRule_Patch - // *HttpRule_Custom - Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` - // The name of the request field whose value is mapped to the HTTP body, or - // `*` for mapping all fields not captured by the path pattern to the HTTP - // body. - Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"` - // Additional HTTP bindings for the selector. Nested bindings must not - // specify a selector and must not contain additional bindings. - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"` -} - -func (m *HttpRule) Reset() { *m = HttpRule{} } -func (m *HttpRule) String() string { return proto.CompactTextString(m) } -func (*HttpRule) ProtoMessage() {} -func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -type isHttpRule_Pattern interface { - isHttpRule_Pattern() -} - -type HttpRule_Get struct { - Get string `protobuf:"bytes,2,opt,name=get,oneof"` -} -type HttpRule_Put struct { - Put string `protobuf:"bytes,3,opt,name=put,oneof"` -} -type HttpRule_Post struct { - Post string `protobuf:"bytes,4,opt,name=post,oneof"` -} -type HttpRule_Delete struct { - Delete string `protobuf:"bytes,5,opt,name=delete,oneof"` -} -type HttpRule_Patch struct { - Patch string `protobuf:"bytes,6,opt,name=patch,oneof"` -} -type HttpRule_Custom struct { - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"` -} - -func (*HttpRule_Get) isHttpRule_Pattern() {} -func (*HttpRule_Put) isHttpRule_Pattern() {} -func (*HttpRule_Post) isHttpRule_Pattern() {} -func (*HttpRule_Delete) isHttpRule_Pattern() {} -func (*HttpRule_Patch) isHttpRule_Pattern() {} -func (*HttpRule_Custom) isHttpRule_Pattern() {} - -func (m *HttpRule) GetPattern() isHttpRule_Pattern { - if m != nil { - return m.Pattern - } - return nil -} - -func (m *HttpRule) GetGet() string { - if x, ok := m.GetPattern().(*HttpRule_Get); ok { - return x.Get - } - return "" -} - -func (m *HttpRule) GetPut() string { - if x, ok := m.GetPattern().(*HttpRule_Put); ok { - return x.Put - } - return "" -} - -func (m *HttpRule) GetPost() string { - if x, ok := m.GetPattern().(*HttpRule_Post); ok { - return x.Post - } - return "" -} - -func (m *HttpRule) GetDelete() string { - if x, ok := m.GetPattern().(*HttpRule_Delete); ok { - return x.Delete - } - return "" -} - -func (m *HttpRule) GetPatch() string { - if x, ok := m.GetPattern().(*HttpRule_Patch); ok { - return x.Patch - } - return "" -} - -func (m *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := m.GetPattern().(*HttpRule_Custom); ok { - return x.Custom - } - return nil -} - -func (m *HttpRule) GetAdditionalBindings() []*HttpRule { - if m != nil { - return m.AdditionalBindings - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } -} - -func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - b.EncodeVarint(2<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Get) - case *HttpRule_Put: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Put) - case *HttpRule_Post: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Post) - case *HttpRule_Delete: - b.EncodeVarint(5<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Delete) - case *HttpRule_Patch: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Patch) - case *HttpRule_Custom: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Custom); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) - } - return nil -} - -func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*HttpRule) - switch tag { - case 2: // pattern.get - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Get{x} - return true, err - case 3: // pattern.put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Put{x} - return true, err - case 4: // pattern.post - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Post{x} - return true, err - case 5: // pattern.delete - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Delete{x} - return true, err - case 6: // pattern.patch - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Pattern = &HttpRule_Patch{x} - return true, err - case 8: // pattern.custom - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(CustomHttpPattern) - err := b.DecodeMessage(msg) - m.Pattern = &HttpRule_Custom{msg} - return true, err - default: - return false, nil - } -} - -func _HttpRule_OneofSizer(msg proto.Message) (n int) { - m := msg.(*HttpRule) - // pattern - switch x := m.Pattern.(type) { - case *HttpRule_Get: - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Get))) - n += len(x.Get) - case *HttpRule_Put: - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Put))) - n += len(x.Put) - case *HttpRule_Post: - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Post))) - n += len(x.Post) - case *HttpRule_Delete: - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Delete))) - n += len(x.Delete) - case *HttpRule_Patch: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Patch))) - n += len(x.Patch) - case *HttpRule_Custom: - s := proto.Size(x.Custom) - n += proto.SizeVarint(8<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// A custom pattern is used for defining custom HTTP verb. -type CustomHttpPattern struct { - // The name of this custom HTTP verb. - Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` -} - -func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } -func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } -func (*CustomHttpPattern) ProtoMessage() {} -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -func init() { - proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") - proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") -} - -func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x91, 0xc1, 0x6a, 0xbb, 0x40, - 0x10, 0xc6, 0xff, 0x46, 0x63, 0xe2, 0x08, 0x7f, 0xe8, 0x36, 0x2d, 0x7b, 0x09, 0x48, 0x4e, 0x9e, - 0x0c, 0xa4, 0x87, 0x1e, 0x7a, 0xb3, 0x14, 0x72, 0x0c, 0xbe, 0x40, 0x59, 0xdd, 0x45, 0x97, 0x1a, - 0x77, 0x88, 0xe3, 0xa1, 0x8f, 0xd2, 0xb7, 0x2d, 0xbb, 0x6b, 0x48, 0xa0, 0xb7, 0xf9, 0x7e, 0x33, - 0x7e, 0xdf, 0xb8, 0x03, 0x4f, 0xad, 0x31, 0x6d, 0xaf, 0xf6, 0x02, 0xf5, 0xbe, 0x23, 0xc2, 0x02, - 0x2f, 0x86, 0x0c, 0x03, 0x8f, 0x0b, 0x81, 0x7a, 0xf7, 0xb3, 0x80, 0xf5, 0x91, 0x08, 0xab, 0xa9, - 0x57, 0x8c, 0x41, 0xd8, 0x2a, 0xe2, 0x8b, 0x2c, 0xc8, 0x93, 0xe3, 0xbf, 0xca, 0x0a, 0xcb, 0x70, - 0x22, 0x1e, 0x5e, 0x19, 0x4e, 0xc4, 0x36, 0x10, 0xa1, 0x19, 0x89, 0x47, 0x33, 0x74, 0x8a, 0x71, - 0x88, 0xa5, 0xea, 0x15, 0x29, 0xbe, 0x9c, 0xf9, 0xac, 0xd9, 0x33, 0x2c, 0x51, 0x50, 0xd3, 0xf1, - 0x78, 0x6e, 0x78, 0xc9, 0x5e, 0x21, 0x6e, 0xa6, 0x91, 0xcc, 0x99, 0xaf, 0xb3, 0x20, 0x4f, 0x0f, - 0xdb, 0xe2, 0xb6, 0x59, 0xf1, 0xee, 0x3a, 0x76, 0xb7, 0x93, 0x20, 0x52, 0x97, 0xc1, 0x1a, 0xfa, - 0x71, 0xc6, 0x20, 0xaa, 0x8d, 0xfc, 0xe6, 0x2b, 0xeb, 0x57, 0xb9, 0x9a, 0x7d, 0xc0, 0xa3, 0x90, - 0x52, 0x93, 0x36, 0x83, 0xe8, 0x3f, 0x6b, 0x3d, 0x48, 0x3d, 0xb4, 0x23, 0x4f, 0xb3, 0x30, 0x4f, - 0x0f, 0x9b, 0x7b, 0xe7, 0xeb, 0xff, 0x56, 0xec, 0xf6, 0x41, 0x39, 0xcf, 0x97, 0x09, 0xac, 0xd0, - 0xe7, 0xed, 0xde, 0xe0, 0xe1, 0xcf, 0x12, 0x36, 0xfa, 0x4b, 0x0f, 0x92, 0x07, 0x3e, 0xda, 0xd6, - 0x96, 0xa1, 0xa0, 0xce, 0x3f, 0x5c, 0xe5, 0xea, 0x72, 0x0b, 0xff, 0x1b, 0x73, 0xbe, 0x8b, 0x2d, - 0x13, 0x67, 0x63, 0x2f, 0x70, 0x0a, 0xea, 0xd8, 0x9d, 0xe2, 0xe5, 0x37, 0x00, 0x00, 0xff, 0xff, - 0x2f, 0x89, 0x57, 0x7f, 0xa3, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.proto index ce07aa14f54f..5f8538a0164b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.proto +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api/http.proto @@ -1,4 +1,4 @@ -// Copyright (c) 2015, Google Inc. +// Copyright 2016 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,38 +16,184 @@ syntax = "proto3"; package google.api; +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "HttpProto"; option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; -// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API -// methods. The mapping determines what portions of the request message are -// populated from the path, query parameters, or body of the HTTP request. The -// mapping is typically specified as an `google.api.http` annotation, see -// "google/api/annotations.proto" for details. +// Defines the HTTP configuration for a service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST APIs. The mapping determines what portions of the request +// message are populated from the path, query parameters, or body of +// the HTTP request. The mapping is typically specified as an +// `google.api.http` annotation, see "google/api/annotations.proto" +// for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. // -// The mapping consists of a mandatory field specifying a path template and an -// optional `body` field specifying what data is represented in the HTTP request -// body. The field name for the path indicates the HTTP method. Example: +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: // -// ``` -// package google.storage.v2; // -// import "google/api/annotations.proto"; +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } // -// service Storage { -// rpc CreateObject(CreateObjectRequest) returns (Object) { -// option (google.api.http) { -// post: "/v2/{bucket_name=buckets/*}/objects" -// body: "object" -// }; -// }; -// } -// ``` // -// Here `bucket_name` and `object` bind to fields of the request message -// `CreateObjectRequest`. +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // The rules for mapping HTTP path, query parameters, and body fields // to the request message are as follows: @@ -72,17 +218,33 @@ option java_package = "com.google.api"; // FieldPath = IDENT { "." IDENT } ; // Verb = ":" LITERAL ; // -// `*` matches a single path component, `**` zero or more path components, and -// `LITERAL` a constant. A `Variable` can match an entire path as specified -// again by a template; this nested template must not contain further variables. -// If no template is given with a variable, it matches a single path component. -// The notation `{var}` is henceforth equivalent to `{var=*}`. +// The syntax `*` matches a single path segment. It follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion. +// +// The syntax `**` matches zero or more path segments. It follows the semantics +// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved +// Expansion. NOTE: it must be the last segment in the path except the Verb. +// +// The syntax `LITERAL` matches literal text in the URL path. +// +// The syntax `Variable` matches the entire path as specified by its template; +// this nested template must not contain further variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. // // Use CustomHttpPattern to specify any HTTP method that is not included in the -// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for +// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for // a given URL path rule. The wild-card rule is useful for services that provide // content to Web (HTML) clients. message HttpRule { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method @@ -109,11 +271,13 @@ message HttpRule { // The name of the request field whose value is mapped to the HTTP body, or // `*` for mapping all fields not captured by the path pattern to the HTTP - // body. + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. string body = 7; - // Additional HTTP bindings for the selector. Nested bindings must not - // specify a selector and must not contain additional bindings. + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). repeated HttpRule additional_bindings = 11; } diff --git a/vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE b/vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE new file mode 100644 index 000000000000..642f85a16a57 --- /dev/null +++ b/vendor/google.golang.org/grpc/.github/ISSUE_TEMPLATE @@ -0,0 +1,14 @@ +Please answer these questions before submitting your issue. + +### What version of gRPC are you using? + +### What version of Go are you using (`go version`)? + +### What operating system (Linux, Windows, …) and version? + +### What did you do? +If possible, provide a recipe for reproducing the error. + +### What did you expect to see? + +### What did you see instead? diff --git a/vendor/google.golang.org/grpc/.please-update b/vendor/google.golang.org/grpc/.please-update new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index b3577c7ae20b..22bf25004a3b 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -1,19 +1,20 @@ language: go go: - - 1.6.3 - - 1.7 - - 1.8 + - 1.7.x + - 1.8.x + - 1.9.x + +matrix: + include: + - go: 1.9.x + env: ARCH=386 go_import_path: google.golang.org/grpc before_install: - - go get github.com/golang/lint/golint - - go get -u golang.org/x/tools/cmd/goimports github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi script: - - '! gofmt -s -d -l . 2>&1 | read' - - '! goimports -l . | read' - - '! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"' - - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf" | grep -vF .pb.go:' # https://github.com/golang/protobuf/issues/214 + - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi - make test testrace diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000000..e491a9e7f783 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 36cd6f7581bb..a5c6e06e2550 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,46 +1,32 @@ # How to contribute -We definitely welcome patches and contribution to grpc! Here are some guidelines -and information about how to do so. +We definitely welcome your patches and contributions to gRPC! -## Sending patches - -### Getting started - -1. Check out the code: - - $ go get google.golang.org/grpc - $ cd $GOPATH/src/google.golang.org/grpc - -1. Create a fork of the grpc-go repository. -1. Add your fork as a remote: - - $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git - -1. Make changes, commit them. -1. Run the test suite: - - $ make test - -1. Push your changes to your fork: - - $ git push fork ... - -1. Open a pull request. +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://cla.developers.google.com/clas). -## Filing Issues -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. + +- Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/Documentation/gomock-example.md b/vendor/google.golang.org/grpc/Documentation/gomock-example.md index 9f7e023d6e72..54743e897ae1 100644 --- a/vendor/google.golang.org/grpc/Documentation/gomock-example.md +++ b/vendor/google.golang.org/grpc/Documentation/gomock-example.md @@ -1,10 +1,12 @@ # Mocking Service for gRPC -[Example code](https://github.com/grpc/grpc-go/tree/master/examples/helloworld/mock) +[Example code unary RPC](https://github.com/grpc/grpc-go/tree/master/examples/helloworld/mock_helloworld) + +[Example code streaming RPC](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/mock_routeguide) ## Why? -To test client-side logic without the overhead of connecting to a real server. Mocking enables users to write light-weight unit tests to check functionalities on client-side without invoking RPC calls to a server. +To test client-side logic without the overhead of connecting to a real server. Mocking enables users to write light-weight unit tests to check functionalities on client-side without invoking RPC calls to a server. ## Idea: Mock the client stub that connects to the server. @@ -61,18 +63,18 @@ Along with this the generated code has a method to create an instance of this st func NewGreeterClient(cc *grpc.ClientConn) GreeterClient ``` -The user code uses this function to create an instance of the struct greeterClient which then can be used to make rpc calls to the server. +The user code uses this function to create an instance of the struct greeterClient which then can be used to make rpc calls to the server. We will mock this interface GreeterClient and use an instance of that mock to make rpc calls. These calls instead of going to server will return pre-determined values. To create a mock we’ll use [mockgen](https://github.com/golang/mock#running-mockgen). -From the directory ``` examples/helloworld/mock/ ``` run ``` mockgen google.golang.org/grpc/examples/helloworld/helloworld GreeterClient > mock_helloworld/hw_mock.go ``` +From the directory ``` examples/helloworld/ ``` run ``` mockgen google.golang.org/grpc/examples/helloworld/helloworld GreeterClient > mock_helloworld/hw_mock.go ``` Notice that in the above command we specify GreeterClient as the interface to be mocked. The user test code can import the package generated by mockgen along with library package gomock to write unit tests around client-side logic. ```Go import "github.com/golang/mock/gomock" -import hwmock "google.golang.org/grpc/examples/helloworld/mock/mock_helloworld" +import hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld" ``` An instance of the mocked interface can be created as: @@ -85,7 +87,7 @@ This mocked object can be programmed to expect calls to its methods and return p mockGreeterClient.EXPECT().SayHello( gomock.Any(), // expect any value for first parameter gomock.Any(), // expect any value for second parameter -).Return(&helloworld.HelloReply{Message: “Mocked RPC”}, nil) +).Return(&helloworld.HelloReply{Message: “Mocked RPC”}, nil) ``` gomock.Any() indicates that the parameter can have any value or type. We can indicate specific values for built-in types with gomock.Eq(). @@ -117,5 +119,64 @@ mockGreeterClient.EXPECT().SayHello( ).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) ``` +## Mock streaming RPCs: + +For our example we consider the case of bi-directional streaming RPCs. Concretely, we'll write a test for RouteChat function from the route guide example to demonstrate how to write mocks for streams. + +RouteChat is a bi-directional streaming RPC, which means calling RouteChat returns a stream that can __Send__ and __Recv__ messages to and from the server, respectively. We'll start by creating a mock of this stream interface returned by RouteChat and then we'll mock the client interface and set expectation on the method RouteChat to return our mocked stream. + +### Generating mocking code: +Like before we'll use [mockgen](https://github.com/golang/mock#running-mockgen). From the `examples/route_guide` directory run: `mockgen google.golang.org/grpc/examples/route_guide/routeguide RouteGuideClient,RouteGuide_RouteChatClient > mock_route_guide/rg_mock.go` + +Notice that we are mocking both client(`RouteGuideClient`) and stream(`RouteGuide_RouteChatClient`) interfaces here. + +This will create a file `rg_mock.go` under directory `mock_route_guide`. This file contins all the mocking code we need to write our test. + +In our test code, like before, we import the this mocking code along with the generated code + +```go +import ( + rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide" + rgpb "google.golang.org/grpc/examples/route_guide/routeguide" +) +``` + +Now conside a test that takes the RouteGuide client object as a parameter, makes a RouteChat rpc call and sends a message on the resulting stream. Furthermore, this test expects to see the same message to be received on the stream. + +```go +var msg = ... + +// Creates a RouteChat call and sends msg on it. +// Checks if the received message was equal to msg. +func testRouteChat(client rgb.RouteChatClient) error{ + ... +} +``` + +We can inject our mock in here by simply passing it as an argument to the method. +Creating mock for stream interface: +```go + stream := rgmock.NewMockRouteGuide_RouteChatClient(ctrl) +} +``` + +Setting Expectations: + +```go + stream.EXPECT().Send(gomock.Any()).Return(nil) + stream.EXPECT().Recv().Return(msg, nil) +``` + +Creating mock for client interface: + +```go + rgclient := rgmock.NewMockRouteGuideClient(ctrl) +``` + +Setting Expectations: + +```go + rgclient.EXPECT().RouteChat(gomock.Any()).Return(stream, nil) +``` diff --git a/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md b/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md index 248d272dce59..1b6b14e9de29 100644 --- a/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md +++ b/vendor/google.golang.org/grpc/Documentation/grpc-auth-support.md @@ -1,6 +1,6 @@ # Authentication -As outlined in the [gRPC authentication guide](http://www.grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. +As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. # Enabling TLS on a gRPC client diff --git a/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md b/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md index c26435269e65..c9a88c689395 100644 --- a/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md +++ b/vendor/google.golang.org/grpc/Documentation/grpc-metadata.md @@ -7,12 +7,12 @@ This doc shows how to send and receive metadata in gRPC-go. Four kinds of service method: -- [Unary RPC](http://www.grpc.io/docs/guides/concepts.html#unary-rpc) -- [Server streaming RPC](http://www.grpc.io/docs/guides/concepts.html#server-streaming-rpc) -- [Client streaming RPC](http://www.grpc.io/docs/guides/concepts.html#client-streaming-rpc) -- [Bidirectional streaming RPC](http://www.grpc.io/docs/guides/concepts.html#bidirectional-streaming-rpc) +- [Unary RPC](https://grpc.io/docs/guides/concepts.html#unary-rpc) +- [Server streaming RPC](https://grpc.io/docs/guides/concepts.html#server-streaming-rpc) +- [Client streaming RPC](https://grpc.io/docs/guides/concepts.html#client-streaming-rpc) +- [Bidirectional streaming RPC](https://grpc.io/docs/guides/concepts.html#bidirectional-streaming-rpc) -And concept of [metadata](http://www.grpc.io/docs/guides/concepts.html#metadata). +And concept of [metadata](https://grpc.io/docs/guides/concepts.html#metadata). ## Constructing metadata @@ -82,7 +82,7 @@ func (s *server) SomeRPC(ctx context.Context, in *pb.SomeRequest) (*pb.SomeRespo ### Sending metadata -To send metadata to server, the client can wrap the metadata into a context using `NewContext`, and make the RPC with this context: +To send metadata to server, the client can wrap the metadata into a context using `NewOutgoingContext`, and make the RPC with this context: ```go md := metadata.Pairs("key", "val") diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE index f4988b450799..d64569567334 100644 --- a/vendor/google.golang.org/grpc/LICENSE +++ b/vendor/google.golang.org/grpc/LICENSE @@ -1,28 +1,202 @@ -Copyright 2014, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 03bb01f0b35b..39606b564a6d 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -20,24 +20,17 @@ proto: echo "error: protoc not installed" >&2; \ exit 1; \ fi - go get -u -v github.com/golang/protobuf/protoc-gen-go - # use $$dir as the root for all proto files in the same directory - for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ - protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ - done + go generate google.golang.org/grpc/... test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... + go test -cpu 1,4 google.golang.org/grpc/... testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... + go test -race -cpu 1,4 google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... -coverage: testdeps - ./coverage.sh --coveralls - .PHONY: \ all \ deps \ diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS deleted file mode 100644 index 69b47959faba..000000000000 --- a/vendor/google.golang.org/grpc/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the gRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of gRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of gRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of gRPC or any code incorporated within this -implementation of gRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of gRPC -shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ae0236f92f3a..622a5dc3e85d 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -2,7 +2,7 @@ [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. Installation ------------ @@ -10,13 +10,13 @@ Installation To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` -$ go get google.golang.org/grpc +$ go get -u google.golang.org/grpc ``` Prerequisites ------------- -This requires Go 1.6 or later. +This requires Go 1.7 or later. Constraints ----------- @@ -26,9 +26,13 @@ Documentation ------------- See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). +Performance +----------- +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + Status ------ -GA +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). FAQ --- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index c99024ee302e..090fbe87c522 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package grpc import ( diff --git a/vendor/google.golang.org/grpc/backoff_test.go b/vendor/google.golang.org/grpc/backoff_test.go index bfca7b176509..37e8e3f62bf9 100644 --- a/vendor/google.golang.org/grpc/backoff_test.go +++ b/vendor/google.golang.org/grpc/backoff_test.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package grpc import "testing" diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index 9d943fbadae1..ab65049ddc17 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -35,6 +20,7 @@ package grpc import ( "fmt" + "net" "sync" "golang.org/x/net/context" @@ -60,6 +46,10 @@ type BalancerConfig struct { // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) } // BalancerGetOptions configures a Get call. @@ -167,7 +157,7 @@ type roundRobin struct { func (rr *roundRobin) watchAddrUpdates() error { updates, err := rr.w.Next() if err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) return err } rr.mu.Lock() @@ -183,7 +173,7 @@ func (rr *roundRobin) watchAddrUpdates() error { for _, v := range rr.addrs { if addr == v.addr { exist = true - grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) break } } @@ -200,7 +190,7 @@ func (rr *roundRobin) watchAddrUpdates() error { } } default: - grpclog.Println("Unknown update.Op ", update.Op) + grpclog.Errorln("Unknown update.Op ", update.Op) } } // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. @@ -211,6 +201,10 @@ func (rr *roundRobin) watchAddrUpdates() error { if rr.done { return ErrClientConnClosing } + select { + case <-rr.addrCh: + default: + } rr.addrCh <- open return nil } @@ -233,7 +227,7 @@ func (rr *roundRobin) Start(target string, config BalancerConfig) error { return err } rr.w = w - rr.addrCh = make(chan []Address) + rr.addrCh = make(chan []Address, 1) go func() { for { if err := rr.watchAddrUpdates(); err != nil { @@ -385,6 +379,9 @@ func (rr *roundRobin) Notify() <-chan []Address { func (rr *roundRobin) Close() error { rr.mu.Lock() defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } rr.done = true if rr.w != nil { rr.w.Close() @@ -398,3 +395,14 @@ func (rr *roundRobin) Close() error { } return nil } + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000000..84e10b630e75 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + // defaultBuilder is the default balancer to use. + defaultBuilder Builder // TODO(bar) install pickfirst as default. +) + +// Register registers the balancer builder to the balancer map. +// b.Name will be used as the name registered with this builder. +func Register(b Builder) { + m[b.Name()] = b +} + +// Get returns the resolver builder registered with the given name. +// If no builder is register with the name, the default pickfirst will +// be used. +func Get(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return defaultBuilder +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct{} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot everytime its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // this call until a new picker is updated and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000000..99e71cd390e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,241 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return &rrBuilder{} +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrBuilder struct{} + +func (*rrBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &rrBalancer{ + cc: cc, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: newPicker([]balancer.SubConn{}, nil), + } +} + +func (*rrBuilder) Name() string { + return "roundrobin" +} + +type rrBalancer struct { + cc balancer.ClientConn + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker *picker +} + +func (b *rrBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("roundrobin.rrBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("roundrobin.rrBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("roundrobin.rrBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - or does round robin selection of all READY SubConns otherwise. +func (b *rrBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = newPicker(nil, balancer.ErrTransientFailure) + return + } + var readySCs []balancer.SubConn + for sc, st := range b.scStates { + if st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + b.picker = newPicker(readySCs, nil) +} + +func (b *rrBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("roundrobin.rrBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("roundrobin.rrBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) + return +} + +// Close is a nop because roundrobin balancer doesn't internal state to clean +// up, and it doesn't need to call RemoveSubConn for the SubConns. +func (b *rrBalancer) Close() { +} + +type picker struct { + // If err is not nil, Pick always returns this err. It's immutable after + // picker is created. + err error + + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func newPicker(scs []balancer.SubConn, err error) *picker { + grpclog.Infof("roundrobinPicker: newPicker called with scs: %v, %v", scs, err) + if err != nil { + return &picker{err: err} + } + return &picker{ + subConns: scs, + } +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go new file mode 100644 index 000000000000..505ddaf9d970 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go @@ -0,0 +1,470 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package roundrobin + +import ( + "fmt" + "net" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/leakcheck" +) + +type testServer struct { + testpb.TestServiceServer +} + +func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil +} + +func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { + return nil +} + +type test struct { + servers []*grpc.Server + addresses []string +} + +func (t *test) cleanup() { + for _, s := range t.servers { + s.Stop() + } +} + +func startTestServers(count int) (_ *test, err error) { + t := &test{} + + defer func() { + if err != nil { + for _, s := range t.servers { + s.Stop() + } + } + }() + for i := 0; i < count; i++ { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("Failed to listen %v", err) + } + + s := grpc.NewServer() + testpb.RegisterTestServiceServer(s, &testServer{}) + t.servers = append(t.servers, s) + t.addresses = append(t.addresses, lis.Addr().String()) + + go func(s *grpc.Server, l net.Listener) { + s.Serve(l) + }(s, lis) + } + + return t, nil +} + +func TestOneBackend(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + // The second RPC should succeed. + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } +} + +func TestBackendsRoundRobin(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + backendCount := 5 + test, err := startTestServers(backendCount) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var resolvedAddrs []resolver.Address + for i := 0; i < backendCount; i++ { + resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) + } + + r.NewAddress(resolvedAddrs) + var p peer.Peer + // Make sure connections to all servers are up. + for si := 0; si < backendCount; si++ { + var connected bool + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() == test.addresses[si] { + connected = true + break + } + time.Sleep(time.Millisecond) + } + if !connected { + t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) + } + } + + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } +} + +func TestAddressesRemoved(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + // The second RPC should succeed. + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{}) + for i := 0; i < 1000; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) == codes.DeadlineExceeded { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("No RPC failed after removing all addresses, want RPC to fail with DeadlineExceeded") +} + +func TestCloseWithPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + testc := testpb.NewTestServiceClient(cc) + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until cc is closed. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) == codes.DeadlineExceeded { + t.Errorf("RPC failed because of deadline after cc is closed; want error the client connection is closing") + } + cancel() + }() + } + cc.Close() + wg.Wait() +} + +func TestNewAddressWhileBlocking(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + test, err := startTestServers(1) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + // The second RPC should succeed. + ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, nil", err) + } + + r.NewAddress([]resolver.Address{}) + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until NewAddress is called. + testc.EmptyCall(context.Background(), &testpb.Empty{}) + }() + } + time.Sleep(50 * time.Millisecond) + r.NewAddress([]resolver.Address{{Addr: test.addresses[0]}}) + wg.Wait() +} + +func TestOneServerDown(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + backendCount := 3 + test, err := startTestServers(backendCount) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var resolvedAddrs []resolver.Address + for i := 0; i < backendCount; i++ { + resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) + } + + r.NewAddress(resolvedAddrs) + var p peer.Peer + // Make sure connections to all servers are up. + for si := 0; si < backendCount; si++ { + var connected bool + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() == test.addresses[si] { + connected = true + break + } + time.Sleep(time.Millisecond) + } + if !connected { + t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) + } + } + + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } + + // Stop one server, RPCs should roundrobin among the remaining servers. + backendCount-- + test.servers[backendCount].Stop() + // Loop until see server[backendCount-1] twice without seeing server[backendCount]. + var targetSeen int + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + switch p.Addr.String() { + case test.addresses[backendCount-1]: + targetSeen++ + case test.addresses[backendCount]: + // Reset targetSeen if peer is server[backendCount]. + targetSeen = 0 + } + // Break to make sure the last picked address is server[-1], so the following for loop won't be flaky. + if targetSeen >= 2 { + break + } + } + if targetSeen != 2 { + t.Fatal("Failed to see server[backendCount-1] twice without seeing server[backendCount]") + } + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Errorf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } +} + +func TestAllServersDown(t *testing.T) { + defer leakcheck.Check(t) + r, cleanup := manual.GenerateAndRegisterManualResolver() + defer cleanup() + + backendCount := 3 + test, err := startTestServers(backendCount) + if err != nil { + t.Fatalf("failed to start servers: %v", err) + } + defer test.cleanup() + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerBuilder(newBuilder())) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + testc := testpb.NewTestServiceClient(cc) + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var resolvedAddrs []resolver.Address + for i := 0; i < backendCount; i++ { + resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) + } + + r.NewAddress(resolvedAddrs) + var p peer.Peer + // Make sure connections to all servers are up. + for si := 0; si < backendCount; si++ { + var connected bool + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() == test.addresses[si] { + connected = true + break + } + time.Sleep(time.Millisecond) + } + if !connected { + t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) + } + } + + for i := 0; i < 3*backendCount; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + if p.Addr.String() != test.addresses[i%backendCount] { + t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) + } + } + + // All servers are stopped, failfast RPC should fail with unavailable. + for i := 0; i < backendCount; i++ { + test.servers[i].Stop() + } + time.Sleep(100 * time.Millisecond) + for i := 0; i < 1000; i++ { + if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) == codes.Unavailable { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("Failfast RPCs didn't fail with Unavailable after all servers are stopped") +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000000..f5dbc4ba201f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequencially, so the balancer can be implemeneted +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + grpclog.Infof("ccBalancerWrapper: removing subconn") + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p) + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) + acbw.mu.Lock() + defer acbw.mu.Unlock() + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect(false) + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect(false) +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_test.go b/vendor/google.golang.org/grpc/balancer_test.go index 48f8b27d4629..29dbe0a67671 100644 --- a/vendor/google.golang.org/grpc/balancer_test.go +++ b/vendor/google.golang.org/grpc/balancer_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,13 +21,16 @@ package grpc import ( "fmt" "math" + "strconv" "sync" "testing" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/grpclog/glogger" "google.golang.org/grpc/naming" + "google.golang.org/grpc/test/leakcheck" ) type testWatcher struct { @@ -70,6 +58,7 @@ func (w *testWatcher) Next() (updates []*naming.Update, err error) { } func (w *testWatcher) Close() { + close(w.side) } // Inject naming resolution updates to the testWatcher. @@ -103,7 +92,7 @@ func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) { return r.w, nil } -func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver) { +func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver, func()) { var servers []*server for i := 0; i < numServers; i++ { s := newTestServer() @@ -112,20 +101,27 @@ func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, * s.wait(t, 2*time.Second) } // Point to server[0] - addr := "127.0.0.1:" + servers[0].port + addr := "localhost:" + servers[0].port return servers, &testNameResolver{ - addr: addr, - } + addr: addr, + }, func() { + for i := 0; i < numServers; i++ { + servers[i].stop() + } + } } func TestNameDiscovery(t *testing.T) { + defer leakcheck.Check(t) // Start 2 servers on 2 ports. numServers := 2 - servers, r := startServers(t, numServers, math.MaxUint32) + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() req := "port" var reply string if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { @@ -135,11 +131,11 @@ func TestNameDiscovery(t *testing.T) { var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Delete, - Addr: "127.0.0.1:" + servers[0].port, + Addr: "localhost:" + servers[0].port, }) updates = append(updates, &naming.Update{ Op: naming.Add, - Addr: "127.0.0.1:" + servers[1].port, + Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) // Loop until the rpcs in flight talks to servers[1]. @@ -149,18 +145,17 @@ func TestNameDiscovery(t *testing.T) { } time.Sleep(10 * time.Millisecond) } - cc.Close() - for i := 0; i < numServers; i++ { - servers[i].stop() - } } func TestEmptyAddrs(t *testing.T) { - servers, r := startServers(t, 1, math.MaxUint32) + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() var reply string if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) @@ -169,33 +164,36 @@ func TestEmptyAddrs(t *testing.T) { // available after that. u := &naming.Update{ Op: naming.Delete, - Addr: "127.0.0.1:" + servers[0].port, + Addr: "localhost:" + servers[0].port, } r.w.inject([]*naming.Update{u}) // Loop until the above updates apply. for { time.Sleep(10 * time.Millisecond) - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil { + cancel() break } + cancel() } - cc.Close() - servers[0].stop() } func TestRoundRobin(t *testing.T) { + defer leakcheck.Check(t) // Start 3 servers on 3 ports. numServers := 3 - servers, r := startServers(t, numServers, math.MaxUint32) + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() // Add servers[1] to the service discovery. u := &naming.Update{ Op: naming.Add, - Addr: "127.0.0.1:" + servers[1].port, + Addr: "localhost:" + servers[1].port, } r.w.inject([]*naming.Update{u}) req := "port" @@ -210,7 +208,7 @@ func TestRoundRobin(t *testing.T) { // Add server2[2] to the service discovery. u = &naming.Update{ Op: naming.Add, - Addr: "127.0.0.1:" + servers[2].port, + Addr: "localhost:" + servers[2].port, } r.w.inject([]*naming.Update{u}) // Loop until both servers[2] are up. @@ -226,18 +224,17 @@ func TestRoundRobin(t *testing.T) { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port) } } - cc.Close() - for i := 0; i < numServers; i++ { - servers[i].stop() - } } func TestCloseWithPendingRPC(t *testing.T) { - servers, r := startServers(t, 1, math.MaxUint32) + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() var reply string if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) @@ -245,16 +242,18 @@ func TestCloseWithPendingRPC(t *testing.T) { // Remove the server. updates := []*naming.Update{{ Op: naming.Delete, - Addr: "127.0.0.1:" + servers[0].port, + Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) // Loop until the above update applies. for { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + cancel() break } time.Sleep(10 * time.Millisecond) + cancel() } // Issue 2 RPCs which should be completed with error status once cc is closed. var wg sync.WaitGroup @@ -277,27 +276,31 @@ func TestCloseWithPendingRPC(t *testing.T) { time.Sleep(5 * time.Millisecond) cc.Close() wg.Wait() - servers[0].stop() } func TestGetOnWaitChannel(t *testing.T) { - servers, r := startServers(t, 1, math.MaxUint32) + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() // Remove all servers so that all upcoming RPCs will block on waitCh. updates := []*naming.Update{{ Op: naming.Delete, - Addr: "127.0.0.1:" + servers[0].port, + Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) for { var reply string - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + cancel() break } + cancel() time.Sleep(10 * time.Millisecond) } var wg sync.WaitGroup @@ -312,28 +315,29 @@ func TestGetOnWaitChannel(t *testing.T) { // Add a connected server to get the above RPC through. updates = []*naming.Update{{ Op: naming.Add, - Addr: "127.0.0.1:" + servers[0].port, + Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) // Wait until the above RPC succeeds. wg.Wait() - cc.Close() - servers[0].stop() } func TestOneServerDown(t *testing.T) { + defer leakcheck.Check(t) // Start 2 servers. numServers := 2 - servers, r := startServers(t, numServers, math.MaxUint32) + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() // Add servers[1] to the service discovery. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Add, - Addr: "127.0.0.1:" + servers[1].port, + Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) req := "port" @@ -369,25 +373,24 @@ func TestOneServerDown(t *testing.T) { }() } wg.Wait() - cc.Close() - for i := 0; i < numServers; i++ { - servers[i].stop() - } } func TestOneAddressRemoval(t *testing.T) { + defer leakcheck.Check(t) // Start 2 servers. numServers := 2 - servers, r := startServers(t, numServers, math.MaxUint32) + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } + defer cc.Close() // Add servers[1] to the service discovery. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Add, - Addr: "127.0.0.1:" + servers[1].port, + Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) req := "port" @@ -410,7 +413,7 @@ func TestOneAddressRemoval(t *testing.T) { var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Delete, - Addr: "127.0.0.1:" + servers[0].port, + Addr: "localhost:" + servers[0].port, }) r.w.inject(updates) wg.Done() @@ -431,8 +434,365 @@ func TestOneAddressRemoval(t *testing.T) { }() } wg.Wait() +} + +func checkServerUp(t *testing.T, currentServer *server) { + req := "port" + port := currentServer.port + cc, err := Dial("localhost:"+port, WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == port { + break + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestPickFirstEmptyAddrs(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) + } + // Inject name resolution change to remove the server so that there is no address + // available after that. + u := &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until the above updates apply. + for { + time.Sleep(10 * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc); err != nil { + cancel() + break + } + cancel() + } +} + +func TestPickFirstCloseWithPendingRPC(t *testing.T) { + defer leakcheck.Check(t) + servers, r, cleanup := startServers(t, 1, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) + } + // Remove the server. + updates := []*naming.Update{{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }} + r.w.inject(updates) + // Loop until the above update applies. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if err := Invoke(ctx, "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); Code(err) == codes.DeadlineExceeded { + cancel() + break + } + time.Sleep(10 * time.Millisecond) + cancel() + } + // Issue 2 RPCs which should be completed with error status once cc is closed. + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + var reply string + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + go func() { + defer wg.Done() + var reply string + time.Sleep(5 * time.Millisecond) + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err == nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + }() + time.Sleep(5 * time.Millisecond) cc.Close() - for i := 0; i < numServers; i++ { - servers[i].stop() + wg.Wait() +} + +func TestPickFirstOrderAllServerUp(t *testing.T) { + defer leakcheck.Check(t) + // Start 3 servers on 3 ports. + numServers := 3 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] and [2] to the service discovery. + u := &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + + // Loop until all 3 servers are up + checkServerUp(t, servers[0]) + checkServerUp(t, servers[1]) + checkServerUp(t, servers[2]) + + // Check the incoming RPCs served in server[0] + req := "port" + var reply string + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[0] in the balancer, the incoming RPCs served in server[1] + // For test addrconn, close server[0] instead + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + // Loop until it changes to server[1] + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) } + + // Add server[0] back to the balancer, the incoming RPCs served in server[1] + // Add is append operation, the order of Notify now is {server[1].port server[2].port server[0].port} + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[0].port, + } + r.w.inject([]*naming.Update{u}) + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[1] in the balancer, the incoming RPCs served in server[2] + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port { + break + } + time.Sleep(1 * time.Second) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[2].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[2] in the balancer, the incoming RPCs served in server[0] + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(1 * time.Second) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestPickFirstOrderOneServerDown(t *testing.T) { + defer leakcheck.Check(t) + // Start 3 servers on 3 ports. + numServers := 3 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] and [2] to the service discovery. + u := &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + + u = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[2].port, + } + r.w.inject([]*naming.Update{u}) + + // Loop until all 3 servers are up + checkServerUp(t, servers[0]) + checkServerUp(t, servers[1]) + checkServerUp(t, servers[2]) + + // Check the incoming RPCs served in server[0] + req := "port" + var reply string + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } + + // server[0] down, incoming RPCs served in server[1], but the order of Notify still remains + // {server[0] server[1] server[2]} + servers[0].stop() + // Loop until it changes to server[1] + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(10 * time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // up the server[0] back, the incoming RPCs served in server[1] + p, _ := strconv.Atoi(servers[0].port) + servers[0] = newTestServer() + go servers[0].start(t, p, math.MaxUint32) + defer servers[0].stop() + servers[0].wait(t, 2*time.Second) + checkServerUp(t, servers[0]) + + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Delete server[1] in the balancer, the incoming RPCs served in server[0] + u = &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[1].port, + } + r.w.inject([]*naming.Update{u}) + for { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(1 * time.Second) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } +} + +func TestPickFirstOneAddressRemoval(t *testing.T) { + defer leakcheck.Check(t) + // Start 2 servers. + numServers := 2 + servers, r, cleanup := startServers(t, numServers, math.MaxUint32) + defer cleanup() + cc, err := Dial("localhost:"+servers[0].port, WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("Failed to create ClientConn: %v", err) + } + defer cc.Close() + // Add servers[1] to the service discovery. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + }) + r.w.inject(updates) + + // Create a new cc to Loop until servers[1] is up + checkServerUp(t, servers[0]) + checkServerUp(t, servers[1]) + + var wg sync.WaitGroup + numRPC := 100 + sleepDuration := 10 * time.Millisecond + wg.Add(1) + go func() { + time.Sleep(sleepDuration) + // After sleepDuration, delete server[0]. + var updates []*naming.Update + updates = append(updates, &naming.Update{ + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }) + r.w.inject(updates) + wg.Done() + }() + + // All non-failfast RPCs should not fail because there's at least one connection available. + for i := 0; i < numRPC; i++ { + wg.Add(1) + go func() { + var reply string + time.Sleep(sleepDuration) + // After sleepDuration, invoke RPC. + // server[0] is removed around the same time to make it racy between balancer and gRPC internals. + if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil { + t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) + } + wg.Done() + }() + } + wg.Wait() } diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 000000000000..9d0616080a1b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,367 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(cc.Target(), BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.cc.Target(), + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.cc.Target()}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + oldSC.UpdateAddresses(newAddrs) + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } + return +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. + return +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() + return +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchmain/main.go b/vendor/google.golang.org/grpc/benchmark/benchmain/main.go new file mode 100644 index 000000000000..0cc1f25e630b --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchmain/main.go @@ -0,0 +1,499 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package main provides benchmark with setting flags. + +An example to run some benchmarks with profiling enabled: + +go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ + -compression=on -maxConcurrentCalls=1 -trace=off \ + -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ + -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result + +As a suggestion, when creating a branch, you can run this benchmark and save the result +file "-resultFile=basePerf", and later when you at the middle of the work or finish the +work, you can get the benchmark result and compare it with the base anytime. + +Assume there are two result files names as "basePerf" and "curPerf" created by adding +-resultFile=basePerf and -resultFile=curPerf. + To format the curPerf, run: + go run benchmark/benchresult/main.go curPerf + To observe how the performance changes based on a base result, run: + go run benchmark/benchresult/main.go basePerf curPerf +*/ +package main + +import ( + "encoding/gob" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "reflect" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + bm "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/latency" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" +) + +const ( + modeOn = "on" + modeOff = "off" + modeBoth = "both" +) + +var allCompressionModes = []string{modeOn, modeOff, modeBoth} +var allTraceModes = []string{modeOn, modeOff, modeBoth} + +const ( + workloadsUnary = "unary" + workloadsStreaming = "streaming" + workloadsAll = "all" +) + +var allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsAll} + +var ( + runMode = []bool{true, true} // {runUnary, runStream} + // When set the latency to 0 (no delay), the result is slower than the real result with no delay + // because latency simulation section has extra operations + ltc = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. + kbps = []int{0, 10240} // if non-positive, infinite + mtu = []int{0} // if non-positive, infinite + maxConcurrentCalls = []int{1, 8, 64, 512} + reqSizeBytes = []int{1, 1024, 1024 * 1024} + respSizeBytes = []int{1, 1024, 1024 * 1024} + enableTrace []bool + benchtime time.Duration + memProfile, cpuProfile string + memProfileRate int + enableCompressor []bool + networkMode string + benchmarkResultFile string + networks = map[string]latency.Network{ + "Local": latency.Local, + "LAN": latency.LAN, + "WAN": latency.WAN, + "Longhaul": latency.Longhaul, + } +) + +func unaryBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { + caller, close := makeFuncUnary(benchFeatures) + defer close() + runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s) +} + +func streamBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { + caller, close := makeFuncStream(benchFeatures) + defer close() + runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s) +} + +func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) { + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + opts := []grpc.DialOption{} + sopts := []grpc.ServerOption{} + if benchFeatures.EnableCompressor { + sopts = append(sopts, + grpc.RPCCompressor(nopCompressor{}), + grpc.RPCDecompressor(nopDecompressor{}), + ) + opts = append(opts, + grpc.WithCompressor(nopCompressor{}), + grpc.WithDecompressor(nopDecompressor{}), + ) + } + sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + })) + opts = append(opts, grpc.WithInsecure()) + + target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...) + conn := bm.NewClientConn(target, opts...) + tc := testpb.NewBenchmarkServiceClient(conn) + return func(int) { + unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + }, func() { + conn.Close() + stopper() + } +} + +func makeFuncStream(benchFeatures stats.Features) (func(int), func()) { + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + opts := []grpc.DialOption{} + sopts := []grpc.ServerOption{} + if benchFeatures.EnableCompressor { + sopts = append(sopts, + grpc.RPCCompressor(grpc.NewGZIPCompressor()), + grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), + ) + opts = append(opts, + grpc.WithCompressor(grpc.NewGZIPCompressor()), + grpc.WithDecompressor(grpc.NewGZIPDecompressor()), + ) + } + sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + })) + opts = append(opts, grpc.WithInsecure()) + + target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...) + conn := bm.NewClientConn(target, opts...) + tc := testpb.NewBenchmarkServiceClient(conn) + streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls) + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + streams[i] = stream + } + return func(pos int) { + streamCaller(streams[pos], benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + }, func() { + conn.Close() + stopper() + } +} + +func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { + if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil { + grpclog.Fatalf("DoUnaryCall failed: %v", err) + } +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { + if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { + grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) + } +} + +func runBenchmark(caller func(int), startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { + // Warm up connection. + for i := 0; i < 10; i++ { + caller(0) + } + // Run benchmark. + startTimer() + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(benchFeatures.MaxConcurrentCalls) + bmEnd := time.Now().Add(benchtime) + var count int32 + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + go func(pos int) { + for { + t := time.Now() + if t.After(bmEnd) { + break + } + start := time.Now() + caller(pos) + elapse := time.Since(start) + atomic.AddInt32(&count, 1) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }(i) + } + wg.Wait() + stopTimer(count) +} + +// Initiate main function to get settings of features. +func init() { + var ( + workloads, traceMode, compressorMode, readLatency string + readKbps, readMtu, readMaxConcurrentCalls intSliceType + readReqSizeBytes, readRespSizeBytes intSliceType + ) + flag.StringVar(&workloads, "workloads", workloadsAll, + fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", "))) + flag.StringVar(&traceMode, "trace", modeOff, + fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", "))) + flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list") + flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark") + flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list") + flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list") + flag.Var(&readMaxConcurrentCalls, "maxConcurrentCalls", "Number of concurrent RPCs during benchmarks") + flag.Var(&readReqSizeBytes, "reqSizeBytes", "Request size in bytes - may be a comma-separated list") + flag.Var(&readRespSizeBytes, "respSizeBytes", "Response size in bytes - may be a comma-separated list") + flag.StringVar(&memProfile, "memProfile", "", "Enables memory profiling output to the filename provided.") + flag.IntVar(&memProfileRate, "memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+ + "memProfile should be set before setting profile rate. To include every allocated block in the profile, "+ + "set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.") + flag.StringVar(&cpuProfile, "cpuProfile", "", "Enables CPU profiling output to the filename provided") + flag.StringVar(&compressorMode, "compression", modeOff, + fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompressionModes, ", "))) + flag.StringVar(&benchmarkResultFile, "resultFile", "", "Save the benchmark result into a binary file") + flag.StringVar(&networkMode, "networkMode", "", "Network mode includes LAN, WAN, Local and Longhaul") + flag.Parse() + if flag.NArg() != 0 { + log.Fatal("Error: unparsed arguments: ", flag.Args()) + } + switch workloads { + case workloadsUnary: + runMode[0] = true + runMode[1] = false + case workloadsStreaming: + runMode[0] = false + runMode[1] = true + case workloadsAll: + runMode[0] = true + runMode[1] = true + default: + log.Fatalf("Unknown workloads setting: %v (want one of: %v)", + workloads, strings.Join(allWorkloads, ", ")) + } + enableCompressor = setMode(compressorMode) + enableTrace = setMode(traceMode) + // Time input formats as (time + unit). + readTimeFromInput(<c, readLatency) + readIntFromIntSlice(&kbps, readKbps) + readIntFromIntSlice(&mtu, readMtu) + readIntFromIntSlice(&maxConcurrentCalls, readMaxConcurrentCalls) + readIntFromIntSlice(&reqSizeBytes, readReqSizeBytes) + readIntFromIntSlice(&respSizeBytes, readRespSizeBytes) + // Re-write latency, kpbs and mtu if network mode is set. + if network, ok := networks[networkMode]; ok { + ltc = []time.Duration{network.Latency} + kbps = []int{network.Kbps} + mtu = []int{network.MTU} + } +} + +func setMode(name string) []bool { + switch name { + case modeOn: + return []bool{true} + case modeOff: + return []bool{false} + case modeBoth: + return []bool{false, true} + default: + log.Fatalf("Unknown %s setting: %v (want one of: %v)", + name, name, strings.Join(allCompressionModes, ", ")) + return []bool{} + } +} + +type intSliceType []int + +func (intSlice *intSliceType) String() string { + return fmt.Sprintf("%v", *intSlice) +} + +func (intSlice *intSliceType) Set(value string) error { + if len(*intSlice) > 0 { + return errors.New("interval flag already set") + } + for _, num := range strings.Split(value, ",") { + next, err := strconv.Atoi(num) + if err != nil { + return err + } + *intSlice = append(*intSlice, next) + } + return nil +} + +func readIntFromIntSlice(values *[]int, replace intSliceType) { + // If not set replace in the flag, just return to run the default settings. + if len(replace) == 0 { + return + } + *values = replace +} + +func readTimeFromInput(values *[]time.Duration, replace string) { + if strings.Compare(replace, "") != 0 { + *values = []time.Duration{} + for _, ltc := range strings.Split(replace, ",") { + duration, err := time.ParseDuration(ltc) + if err != nil { + log.Fatal(err.Error()) + } + *values = append(*values, duration) + } + } +} + +func main() { + before() + featuresPos := make([]int, 8) + // 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize + featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu), + len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor)} + initalPos := make([]int, len(featuresPos)) + s := stats.NewStats(10) + s.SortLatency() + var memStats runtime.MemStats + var results testing.BenchmarkResult + var startAllocs, startBytes uint64 + var startTime time.Time + start := true + var startTimer = func() { + runtime.ReadMemStats(&memStats) + startAllocs = memStats.Mallocs + startBytes = memStats.TotalAlloc + startTime = time.Now() + } + var stopTimer = func(count int32) { + runtime.ReadMemStats(&memStats) + results = testing.BenchmarkResult{N: int(count), T: time.Now().Sub(startTime), + Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes} + } + sharedPos := make([]bool, len(featuresPos)) + for i := 0; i < len(featuresPos); i++ { + if featuresNum[i] <= 1 { + sharedPos[i] = true + } + } + + // Run benchmarks + resultSlice := []stats.BenchResults{} + for !reflect.DeepEqual(featuresPos, initalPos) || start { + start = false + benchFeature := stats.Features{ + NetworkMode: networkMode, + EnableTrace: enableTrace[featuresPos[0]], + Latency: ltc[featuresPos[1]], + Kbps: kbps[featuresPos[2]], + Mtu: mtu[featuresPos[3]], + MaxConcurrentCalls: maxConcurrentCalls[featuresPos[4]], + ReqSizeBytes: reqSizeBytes[featuresPos[5]], + RespSizeBytes: respSizeBytes[featuresPos[6]], + EnableCompressor: enableCompressor[featuresPos[7]], + } + + grpc.EnableTracing = enableTrace[featuresPos[0]] + if runMode[0] { + unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s) + s.SetBenchmarkResult("Unary", benchFeature, results.N, + results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos) + fmt.Println(s.BenchString()) + fmt.Println(s.String()) + resultSlice = append(resultSlice, s.GetBenchmarkResults()) + s.Clear() + } + if runMode[1] { + streamBenchmark(startTimer, stopTimer, benchFeature, benchtime, s) + s.SetBenchmarkResult("Stream", benchFeature, results.N, + results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos) + fmt.Println(s.BenchString()) + fmt.Println(s.String()) + resultSlice = append(resultSlice, s.GetBenchmarkResults()) + s.Clear() + } + bm.AddOne(featuresPos, featuresNum) + } + after(resultSlice) +} + +func before() { + if memProfile != "" { + runtime.MemProfileRate = memProfileRate + } + if cpuProfile != "" { + f, err := os.Create(cpuProfile) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + return + } + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) + f.Close() + return + } + } +} + +func after(data []stats.BenchResults) { + if cpuProfile != "" { + pprof.StopCPUProfile() // flushes profile to disk + } + if memProfile != "" { + f, err := os.Create(memProfile) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + runtime.GC() // materialize all statistics + if err = pprof.WriteHeapProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", memProfile, err) + os.Exit(2) + } + f.Close() + } + if benchmarkResultFile != "" { + f, err := os.Create(benchmarkResultFile) + if err != nil { + log.Fatalf("testing: can't write benchmark result %s: %s\n", benchmarkResultFile, err) + } + dataEncoder := gob.NewEncoder(f) + dataEncoder.Encode(data) + f.Close() + } +} + +// nopCompressor is a compressor that just copies data. +type nopCompressor struct{} + +func (nopCompressor) Do(w io.Writer, p []byte) error { + n, err := w.Write(p) + if err != nil { + return err + } + if n != len(p) { + return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p)) + } + return nil +} + +func (nopCompressor) Type() string { return "nop" } + +// nopDecompressor is a decompressor that just copies data. +type nopDecompressor struct{} + +func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) } +func (nopDecompressor) Type() string { return "nop" } diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark.go b/vendor/google.golang.org/grpc/benchmark/benchmark.go index fc3304acc46d..f09fa4543f9e 100644 --- a/vendor/google.golang.org/grpc/benchmark/benchmark.go +++ b/vendor/google.golang.org/grpc/benchmark/benchmark.go @@ -1,36 +1,23 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing grpc_testing/control.proto grpc_testing/messages.proto grpc_testing/payloads.proto grpc_testing/services.proto grpc_testing/stats.proto + /* Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. */ @@ -40,13 +27,29 @@ import ( "fmt" "io" "net" + "sync" + "testing" + "time" "golang.org/x/net/context" "google.golang.org/grpc" testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/latency" + "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/grpclog" ) +// AddOne add 1 to the features slice +func AddOne(features []int, featuresMaxPosition []int) { + for i := len(features) - 1; i >= 0; i-- { + features[i] = (features[i] + 1) + if features[i]/featuresMaxPosition[i] == 0 { + break + } + features[i] = features[i] % featuresMaxPosition[i] + } +} + // Allows reuse of the same testpb.Payload object. func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) { if size < 0 { @@ -144,6 +147,9 @@ type ServerInfo struct { // For "protobuf", it's ignored. // For "bytebuf", it should be an int representing response size. Metadata interface{} + + // Network can simulate latency + Network *latency.Network } // StartServer starts a gRPC server serving a benchmark service according to info. @@ -153,6 +159,12 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) { if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } + nw := info.Network + if nw != nil { + lis = nw.Listener(lis) + } + opts = append(opts, grpc.WriteBufferSize(128*1024)) + opts = append(opts, grpc.ReadBufferSize(128*1024)) s := grpc.NewServer(opts...) switch info.Type { case "protobuf": @@ -226,9 +238,127 @@ func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { + opts = append(opts, grpc.WithWriteBufferSize(128*1024)) + opts = append(opts, grpc.WithReadBufferSize(128*1024)) conn, err := grpc.Dial(addr, opts...) if err != nil { grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) } return conn } + +func runUnary(b *testing.B, benchFeatures stats.Features) { + s := stats.AddStats(b, 38) + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + defer stopper() + conn := NewClientConn( + target, grpc.WithInsecure(), + grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + }), + ) + tc := testpb.NewBenchmarkServiceClient(conn) + + // Warm up connection. + for i := 0; i < 10; i++ { + unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + } + ch := make(chan int, benchFeatures.MaxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(benchFeatures.MaxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + go func() { + for range ch { + start := time.Now() + unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + ch <- i + } + close(ch) + wg.Wait() + b.StopTimer() + conn.Close() +} + +func runStream(b *testing.B, benchFeatures stats.Features) { + s := stats.AddStats(b, 38) + nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) + defer stopper() + conn := NewClientConn( + target, grpc.WithInsecure(), + grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) + }), + ) + tc := testpb.NewBenchmarkServiceClient(conn) + + // Warm up connection. + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + for i := 0; i < 10; i++ { + streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + } + + ch := make(chan struct{}, benchFeatures.MaxConcurrentCalls*4) + var ( + mu sync.Mutex + wg sync.WaitGroup + ) + wg.Add(benchFeatures.MaxConcurrentCalls) + + // Distribute the b.N calls over maxConcurrentCalls workers. + for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { + stream, err := tc.StreamingCall(context.Background()) + if err != nil { + b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + go func() { + for range ch { + start := time.Now() + streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) + elapse := time.Since(start) + mu.Lock() + s.Add(elapse) + mu.Unlock() + } + wg.Done() + }() + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + ch <- struct{}{} + } + close(ch) + wg.Wait() + b.StopTimer() + conn.Close() +} +func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { + if err := DoUnaryCall(client, reqSize, respSize); err != nil { + grpclog.Fatalf("DoUnaryCall failed: %v", err) + } +} + +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { + if err := DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { + grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark_test.go b/vendor/google.golang.org/grpc/benchmark/benchmark_test.go index 8fe3fa1581ad..8dc7d3c55889 100644 --- a/vendor/google.golang.org/grpc/benchmark/benchmark_test.go +++ b/vendor/google.golang.org/grpc/benchmark/benchmark_test.go @@ -1,200 +1,83 @@ +// +build go1.7 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package benchmark import ( + "fmt" "os" - "sync" + "reflect" "testing" "time" - "golang.org/x/net/context" "google.golang.org/grpc" - testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/stats" - "google.golang.org/grpc/grpclog" ) -func runUnary(b *testing.B, maxConcurrentCalls int) { - s := stats.AddStats(b, 38) - b.StopTimer() - target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) - defer stopper() - conn := NewClientConn(target, grpc.WithInsecure()) - tc := testpb.NewBenchmarkServiceClient(conn) - - // Warm up connection. - for i := 0; i < 10; i++ { - unaryCaller(tc) - } - ch := make(chan int, maxConcurrentCalls*4) - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(maxConcurrentCalls) - - // Distribute the b.N calls over maxConcurrentCalls workers. - for i := 0; i < maxConcurrentCalls; i++ { - go func() { - for range ch { - start := time.Now() - unaryCaller(tc) - elapse := time.Since(start) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }() - } - b.StartTimer() - for i := 0; i < b.N; i++ { - ch <- i - } - b.StopTimer() - close(ch) - wg.Wait() - conn.Close() -} - -func runStream(b *testing.B, maxConcurrentCalls int) { - s := stats.AddStats(b, 38) - b.StopTimer() - target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) - defer stopper() - conn := NewClientConn(target, grpc.WithInsecure()) - tc := testpb.NewBenchmarkServiceClient(conn) - - // Warm up connection. - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - for i := 0; i < 10; i++ { - streamCaller(stream) - } - - ch := make(chan int, maxConcurrentCalls*4) - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(maxConcurrentCalls) - - // Distribute the b.N calls over maxConcurrentCalls workers. - for i := 0; i < maxConcurrentCalls; i++ { - go func() { - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - for range ch { - start := time.Now() - streamCaller(stream) - elapse := time.Since(start) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }() +func BenchmarkClient(b *testing.B) { + enableTrace := []bool{true, false} // run both enable and disable by default + // When set the latency to 0 (no delay), the result is slower than the real result with no delay + // because latency simulation section has extra operations + latency := []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. + kbps := []int{0, 10240} // if non-positive, infinite + mtu := []int{0} // if non-positive, infinite + maxConcurrentCalls := []int{1, 8, 64, 512} + reqSizeBytes := []int{1, 1024 * 1024} + respSizeBytes := []int{1, 1024 * 1024} + featuresCurPos := make([]int, 7) + + // 0:enableTracing 1:md 2:ltc 3:kbps 4:mtu 5:maxC 6:connCount 7:reqSize 8:respSize + featuresMaxPosition := []int{len(enableTrace), len(latency), len(kbps), len(mtu), len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes)} + initalPos := make([]int, len(featuresCurPos)) + + // run benchmarks + start := true + for !reflect.DeepEqual(featuresCurPos, initalPos) || start { + start = false + tracing := "Trace" + if !enableTrace[featuresCurPos[0]] { + tracing = "noTrace" + } + + benchFeature := stats.Features{ + EnableTrace: enableTrace[featuresCurPos[0]], + Latency: latency[featuresCurPos[1]], + Kbps: kbps[featuresCurPos[2]], + Mtu: mtu[featuresCurPos[3]], + MaxConcurrentCalls: maxConcurrentCalls[featuresCurPos[4]], + ReqSizeBytes: reqSizeBytes[featuresCurPos[5]], + RespSizeBytes: respSizeBytes[featuresCurPos[6]], + } + + grpc.EnableTracing = enableTrace[featuresCurPos[0]] + b.Run(fmt.Sprintf("Unary-%s-%s", + tracing, benchFeature.String()), func(b *testing.B) { + runUnary(b, benchFeature) + }) + + b.Run(fmt.Sprintf("Stream-%s-%s", + tracing, benchFeature.String()), func(b *testing.B) { + runStream(b, benchFeature) + }) + AddOne(featuresCurPos, featuresMaxPosition) } - b.StartTimer() - for i := 0; i < b.N; i++ { - ch <- i - } - b.StopTimer() - close(ch) - wg.Wait() - conn.Close() -} -func unaryCaller(client testpb.BenchmarkServiceClient) { - if err := DoUnaryCall(client, 1, 1); err != nil { - grpclog.Fatalf("DoUnaryCall failed: %v", err) - } -} - -func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { - if err := DoStreamingRoundTrip(stream, 1, 1); err != nil { - grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) - } -} - -func BenchmarkClientStreamc1(b *testing.B) { - grpc.EnableTracing = true - runStream(b, 1) -} - -func BenchmarkClientStreamc8(b *testing.B) { - grpc.EnableTracing = true - runStream(b, 8) -} - -func BenchmarkClientStreamc64(b *testing.B) { - grpc.EnableTracing = true - runStream(b, 64) -} - -func BenchmarkClientStreamc512(b *testing.B) { - grpc.EnableTracing = true - runStream(b, 512) -} -func BenchmarkClientUnaryc1(b *testing.B) { - grpc.EnableTracing = true - runUnary(b, 1) -} - -func BenchmarkClientUnaryc8(b *testing.B) { - grpc.EnableTracing = true - runUnary(b, 8) -} - -func BenchmarkClientUnaryc64(b *testing.B) { - grpc.EnableTracing = true - runUnary(b, 64) -} - -func BenchmarkClientUnaryc512(b *testing.B) { - grpc.EnableTracing = true - runUnary(b, 512) -} - -func BenchmarkClientStreamNoTracec1(b *testing.B) { - grpc.EnableTracing = false - runStream(b, 1) -} - -func BenchmarkClientStreamNoTracec8(b *testing.B) { - grpc.EnableTracing = false - runStream(b, 8) -} - -func BenchmarkClientStreamNoTracec64(b *testing.B) { - grpc.EnableTracing = false - runStream(b, 64) -} - -func BenchmarkClientStreamNoTracec512(b *testing.B) { - grpc.EnableTracing = false - runStream(b, 512) -} -func BenchmarkClientUnaryNoTracec1(b *testing.B) { - grpc.EnableTracing = false - runUnary(b, 1) -} - -func BenchmarkClientUnaryNoTracec8(b *testing.B) { - grpc.EnableTracing = false - runUnary(b, 8) -} - -func BenchmarkClientUnaryNoTracec64(b *testing.B) { - grpc.EnableTracing = false - runUnary(b, 64) -} - -func BenchmarkClientUnaryNoTracec512(b *testing.B) { - grpc.EnableTracing = false - runUnary(b, 512) } func TestMain(m *testing.M) { diff --git a/vendor/google.golang.org/grpc/benchmark/benchresult/main.go b/vendor/google.golang.org/grpc/benchmark/benchresult/main.go new file mode 100644 index 000000000000..40226cff15b9 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/benchresult/main.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +To format the benchmark result: + go run benchmark/benchresult/main.go resultfile + +To see the performance change based on a old result: + go run benchmark/benchresult/main.go resultfile_old resultfile +It will print the comparison result of intersection benchmarks between two files. + +*/ +package main + +import ( + "encoding/gob" + "fmt" + "log" + "os" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/benchmark/stats" +) + +func createMap(fileName string, m map[string]stats.BenchResults) { + f, err := os.Open(fileName) + if err != nil { + log.Fatalf("Read file %s error: %s\n", fileName, err) + } + defer f.Close() + var data []stats.BenchResults + decoder := gob.NewDecoder(f) + if err = decoder.Decode(&data); err != nil { + log.Fatalf("Decode file %s error: %s\n", fileName, err) + } + for _, d := range data { + m[d.RunMode+"-"+d.Features.String()] = d + } +} + +func intChange(title string, val1, val2 int64) string { + return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", title, strconv.FormatInt(val1, 10), + strconv.FormatInt(val2, 10), float64(val2-val1)*100/float64(val1)) +} + +func timeChange(title int, val1, val2 time.Duration) string { + return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", strconv.Itoa(title)+" latency", val1.String(), + val2.String(), float64(val2-val1)*100/float64(val1)) +} + +func compareTwoMap(m1, m2 map[string]stats.BenchResults) { + for k2, v2 := range m2 { + if v1, ok := m1[k2]; ok { + changes := k2 + "\n" + changes += fmt.Sprintf("%10s %12s %12s %8s\n", "Title", "Before", "After", "Percentage") + changes += intChange("Bytes/op", v1.AllocedBytesPerOp, v2.AllocedBytesPerOp) + changes += intChange("Allocs/op", v1.AllocsPerOp, v2.AllocsPerOp) + changes += timeChange(v1.Latency[1].Percent, v1.Latency[1].Value, v2.Latency[1].Value) + changes += timeChange(v1.Latency[2].Percent, v1.Latency[2].Value, v2.Latency[2].Value) + fmt.Printf("%s\n", changes) + } + } +} + +func compareBenchmark(file1, file2 string) { + var BenchValueFile1 map[string]stats.BenchResults + var BenchValueFile2 map[string]stats.BenchResults + BenchValueFile1 = make(map[string]stats.BenchResults) + BenchValueFile2 = make(map[string]stats.BenchResults) + + createMap(file1, BenchValueFile1) + createMap(file2, BenchValueFile2) + + compareTwoMap(BenchValueFile1, BenchValueFile2) +} + +func printline(benchName, ltc50, ltc90, allocByte, allocsOp interface{}) { + fmt.Printf("%-80v%12v%12v%12v%12v\n", benchName, ltc50, ltc90, allocByte, allocsOp) +} + +func formatBenchmark(fileName string) { + f, err := os.Open(fileName) + if err != nil { + log.Fatalf("Read file %s error: %s\n", fileName, err) + } + defer f.Close() + var data []stats.BenchResults + decoder := gob.NewDecoder(f) + if err = decoder.Decode(&data); err != nil { + log.Fatalf("Decode file %s error: %s\n", fileName, err) + } + if len(data) == 0 { + log.Fatalf("No data in file %s\n", fileName) + } + printPos := data[0].SharedPosion + fmt.Println("\nShared features:\n" + strings.Repeat("-", 20)) + fmt.Print(stats.PartialPrintString(printPos, data[0].Features, true)) + fmt.Println(strings.Repeat("-", 35)) + for i := 0; i < len(data[0].SharedPosion); i++ { + printPos[i] = !printPos[i] + } + printline("Name", "latency-50", "latency-90", "Alloc (B)", "Alloc (#)") + for _, d := range data { + name := d.RunMode + stats.PartialPrintString(printPos, d.Features, false) + printline(name, d.Latency[1].Value.String(), d.Latency[2].Value.String(), + d.AllocedBytesPerOp, d.AllocsPerOp) + } +} + +func main() { + if len(os.Args) == 2 { + formatBenchmark(os.Args[1]) + } else { + compareBenchmark(os.Args[1], os.Args[2]) + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/client/main.go b/vendor/google.golang.org/grpc/benchmark/client/main.go index c63d521263a2..9aa587e3a2df 100644 --- a/vendor/google.golang.org/grpc/benchmark/client/main.go +++ b/vendor/google.golang.org/grpc/benchmark/client/main.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package main import ( diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go index 8afae814544b..b88832f75a2e 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: control.proto -// DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. @@ -147,6 +146,13 @@ func (m *PoissonParams) String() string { return proto.CompactTextStr func (*PoissonParams) ProtoMessage() {} func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *PoissonParams) GetOfferedLoad() float64 { + if m != nil { + return m.OfferedLoad + } + return 0 +} + type UniformParams struct { InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo" json:"interarrival_lo,omitempty"` InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi" json:"interarrival_hi,omitempty"` @@ -157,6 +163,20 @@ func (m *UniformParams) String() string { return proto.CompactTextStr func (*UniformParams) ProtoMessage() {} func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *UniformParams) GetInterarrivalLo() float64 { + if m != nil { + return m.InterarrivalLo + } + return 0 +} + +func (m *UniformParams) GetInterarrivalHi() float64 { + if m != nil { + return m.InterarrivalHi + } + return 0 +} + type DeterministicParams struct { OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` } @@ -166,6 +186,13 @@ func (m *DeterministicParams) String() string { return proto.CompactT func (*DeterministicParams) ProtoMessage() {} func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *DeterministicParams) GetOfferedLoad() float64 { + if m != nil { + return m.OfferedLoad + } + return 0 +} + type ParetoParams struct { InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase" json:"interarrival_base,omitempty"` Alpha float64 `protobuf:"fixed64,2,opt,name=alpha" json:"alpha,omitempty"` @@ -176,6 +203,20 @@ func (m *ParetoParams) String() string { return proto.CompactTextStri func (*ParetoParams) ProtoMessage() {} func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *ParetoParams) GetInterarrivalBase() float64 { + if m != nil { + return m.InterarrivalBase + } + return 0 +} + +func (m *ParetoParams) GetAlpha() float64 { + if m != nil { + return m.Alpha + } + return 0 +} + // Once an RPC finishes, immediately start a new one. // No configuration parameters needed. type ClosedLoopParams struct { @@ -411,6 +452,20 @@ func (m *SecurityParams) String() string { return proto.CompactTextSt func (*SecurityParams) ProtoMessage() {} func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *SecurityParams) GetUseTestCa() bool { + if m != nil { + return m.UseTestCa + } + return false +} + +func (m *SecurityParams) GetServerHostOverride() string { + if m != nil { + return m.ServerHostOverride + } + return "" +} + type ClientConfig struct { // List of targets to connect to. At least one target needs to be specified. ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets" json:"server_targets,omitempty"` @@ -430,7 +485,7 @@ type ClientConfig struct { PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams" json:"histogram_params,omitempty"` // Specify the cores we should run the client on, if desired - CoreList []int32 `protobuf:"varint,13,rep,name=core_list,json=coreList" json:"core_list,omitempty"` + CoreList []int32 `protobuf:"varint,13,rep,packed,name=core_list,json=coreList" json:"core_list,omitempty"` CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` } @@ -439,6 +494,20 @@ func (m *ClientConfig) String() string { return proto.CompactTextStri func (*ClientConfig) ProtoMessage() {} func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *ClientConfig) GetServerTargets() []string { + if m != nil { + return m.ServerTargets + } + return nil +} + +func (m *ClientConfig) GetClientType() ClientType { + if m != nil { + return m.ClientType + } + return ClientType_SYNC_CLIENT +} + func (m *ClientConfig) GetSecurityParams() *SecurityParams { if m != nil { return m.SecurityParams @@ -446,6 +515,34 @@ func (m *ClientConfig) GetSecurityParams() *SecurityParams { return nil } +func (m *ClientConfig) GetOutstandingRpcsPerChannel() int32 { + if m != nil { + return m.OutstandingRpcsPerChannel + } + return 0 +} + +func (m *ClientConfig) GetClientChannels() int32 { + if m != nil { + return m.ClientChannels + } + return 0 +} + +func (m *ClientConfig) GetAsyncClientThreads() int32 { + if m != nil { + return m.AsyncClientThreads + } + return 0 +} + +func (m *ClientConfig) GetRpcType() RpcType { + if m != nil { + return m.RpcType + } + return RpcType_UNARY +} + func (m *ClientConfig) GetLoadParams() *LoadParams { if m != nil { return m.LoadParams @@ -467,6 +564,20 @@ func (m *ClientConfig) GetHistogramParams() *HistogramParams { return nil } +func (m *ClientConfig) GetCoreList() []int32 { + if m != nil { + return m.CoreList + } + return nil +} + +func (m *ClientConfig) GetCoreLimit() int32 { + if m != nil { + return m.CoreLimit + } + return 0 +} + type ClientStatus struct { Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` } @@ -494,6 +605,13 @@ func (m *Mark) String() string { return proto.CompactTextString(m) } func (*Mark) ProtoMessage() {} func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (m *Mark) GetReset_() bool { + if m != nil { + return m.Reset_ + } + return false +} + type ClientArgs struct { // Types that are valid to be assigned to Argtype: // *ClientArgs_Setup @@ -627,7 +745,7 @@ type ServerConfig struct { // payload config, used in generic server PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` // Specify the cores we should run the server on, if desired - CoreList []int32 `protobuf:"varint,10,rep,name=core_list,json=coreList" json:"core_list,omitempty"` + CoreList []int32 `protobuf:"varint,10,rep,packed,name=core_list,json=coreList" json:"core_list,omitempty"` } func (m *ServerConfig) Reset() { *m = ServerConfig{} } @@ -635,6 +753,13 @@ func (m *ServerConfig) String() string { return proto.CompactTextStri func (*ServerConfig) ProtoMessage() {} func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *ServerConfig) GetServerType() ServerType { + if m != nil { + return m.ServerType + } + return ServerType_SYNC_SERVER +} + func (m *ServerConfig) GetSecurityParams() *SecurityParams { if m != nil { return m.SecurityParams @@ -642,6 +767,27 @@ func (m *ServerConfig) GetSecurityParams() *SecurityParams { return nil } +func (m *ServerConfig) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ServerConfig) GetAsyncServerThreads() int32 { + if m != nil { + return m.AsyncServerThreads + } + return 0 +} + +func (m *ServerConfig) GetCoreLimit() int32 { + if m != nil { + return m.CoreLimit + } + return 0 +} + func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { if m != nil { return m.PayloadConfig @@ -649,6 +795,13 @@ func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { return nil } +func (m *ServerConfig) GetCoreList() []int32 { + if m != nil { + return m.CoreList + } + return nil +} + type ServerArgs struct { // Types that are valid to be assigned to Argtype: // *ServerArgs_Setup @@ -790,6 +943,20 @@ func (m *ServerStatus) GetStats() *ServerStats { return nil } +func (m *ServerStatus) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ServerStatus) GetCores() int32 { + if m != nil { + return m.Cores + } + return 0 +} + type CoreRequest struct { } @@ -808,6 +975,13 @@ func (m *CoreResponse) String() string { return proto.CompactTextStri func (*CoreResponse) ProtoMessage() {} func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *CoreResponse) GetCores() int32 { + if m != nil { + return m.Cores + } + return 0 +} + type Void struct { } @@ -841,6 +1015,13 @@ func (m *Scenario) String() string { return proto.CompactTextString(m func (*Scenario) ProtoMessage() {} func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *Scenario) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *Scenario) GetClientConfig() *ClientConfig { if m != nil { return m.ClientConfig @@ -848,6 +1029,13 @@ func (m *Scenario) GetClientConfig() *ClientConfig { return nil } +func (m *Scenario) GetNumClients() int32 { + if m != nil { + return m.NumClients + } + return 0 +} + func (m *Scenario) GetServerConfig() *ServerConfig { if m != nil { return m.ServerConfig @@ -855,6 +1043,34 @@ func (m *Scenario) GetServerConfig() *ServerConfig { return nil } +func (m *Scenario) GetNumServers() int32 { + if m != nil { + return m.NumServers + } + return 0 +} + +func (m *Scenario) GetWarmupSeconds() int32 { + if m != nil { + return m.WarmupSeconds + } + return 0 +} + +func (m *Scenario) GetBenchmarkSeconds() int32 { + if m != nil { + return m.BenchmarkSeconds + } + return 0 +} + +func (m *Scenario) GetSpawnLocalWorkerCount() int32 { + if m != nil { + return m.SpawnLocalWorkerCount + } + return 0 +} + // A set of scenarios to be run with qps_json_driver type Scenarios struct { Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios" json:"scenarios,omitempty"` @@ -900,78 +1116,79 @@ func init() { func init() { proto.RegisterFile("control.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1162 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0xdb, 0x46, - 0x13, 0x8d, 0x14, 0xc9, 0x96, 0x86, 0x92, 0xac, 0x6f, 0xbf, 0xa4, 0x60, 0x1c, 0x27, 0x6d, 0xd8, - 0x16, 0x0d, 0x5c, 0xc0, 0x29, 0xd4, 0x02, 0x69, 0xd1, 0x8b, 0x40, 0x56, 0x85, 0xd8, 0x80, 0xe3, - 0xba, 0x2b, 0x27, 0x45, 0xae, 0x08, 0x9a, 0x5a, 0x4b, 0x44, 0x24, 0x2e, 0xbb, 0x4b, 0xc6, 0xf0, - 0x2b, 0xf4, 0x99, 0xfa, 0x1c, 0x7d, 0x8d, 0xbe, 0x42, 0x67, 0xff, 0x64, 0x52, 0x11, 0x10, 0xb7, - 0xbd, 0xe3, 0xce, 0x9c, 0xb3, 0x3b, 0x3b, 0x67, 0x66, 0x96, 0xd0, 0x8d, 0x79, 0x9a, 0x0b, 0xbe, - 0x38, 0xc8, 0x04, 0xcf, 0x39, 0xe9, 0xcc, 0x44, 0x16, 0x1f, 0xe4, 0x4c, 0xe6, 0x49, 0x3a, 0xdb, - 0xed, 0x65, 0xd1, 0xf5, 0x82, 0x47, 0x53, 0x69, 0xbc, 0xbb, 0x9e, 0xcc, 0xa3, 0xdc, 0x2e, 0x82, - 0x01, 0x74, 0xcf, 0x78, 0x22, 0x25, 0x4f, 0xcf, 0x22, 0x11, 0x2d, 0x25, 0x79, 0x02, 0x1d, 0x7e, - 0x79, 0xc9, 0x04, 0x9b, 0x86, 0x8a, 0xe4, 0xd7, 0x3e, 0xab, 0x3d, 0xad, 0x51, 0xcf, 0xda, 0x4e, - 0xd0, 0x14, 0x44, 0xd0, 0x7d, 0x9d, 0x26, 0x97, 0x5c, 0x2c, 0x2d, 0xe7, 0x2b, 0xd8, 0x49, 0xd2, - 0x9c, 0x89, 0x48, 0x88, 0xe4, 0x7d, 0xb4, 0x40, 0xa2, 0xa5, 0xf5, 0xca, 0xe6, 0x13, 0xfe, 0x01, - 0x70, 0x9e, 0xf8, 0xf5, 0x0f, 0x81, 0x47, 0x49, 0xf0, 0x3d, 0xfc, 0xff, 0x27, 0x86, 0x96, 0x65, - 0x92, 0x26, 0x78, 0x8b, 0xf8, 0xf6, 0xc1, 0xfd, 0x02, 0x1d, 0x04, 0xb3, 0x9c, 0x5b, 0xca, 0xd7, - 0xf0, 0xbf, 0xca, 0x91, 0x17, 0x91, 0x64, 0x96, 0xd7, 0x2f, 0x3b, 0x0e, 0xd1, 0x4e, 0xee, 0x41, - 0x33, 0x5a, 0x64, 0xf3, 0xc8, 0x46, 0x65, 0x16, 0x01, 0x81, 0xfe, 0x68, 0xc1, 0xa5, 0x3a, 0x80, - 0x67, 0x66, 0xdb, 0xe0, 0x8f, 0x3a, 0x80, 0x3a, 0xcf, 0x9e, 0x32, 0x04, 0x2f, 0xd6, 0x10, 0x8c, - 0x8b, 0x67, 0x7a, 0x7f, 0x6f, 0xf0, 0xf8, 0xa0, 0xac, 0xc3, 0xc1, 0xfa, 0x1e, 0x47, 0x77, 0x28, - 0xc4, 0x2b, 0x1b, 0x79, 0x0e, 0xdb, 0x99, 0x51, 0x42, 0x9f, 0xee, 0x0d, 0x1e, 0x56, 0xe9, 0x15, - 0x99, 0x90, 0xeb, 0xd0, 0x8a, 0x58, 0x18, 0x39, 0xfc, 0xbb, 0x9b, 0x88, 0x15, 0xad, 0x14, 0xd1, - 0xa2, 0xc9, 0x8f, 0xb0, 0x35, 0xd5, 0x49, 0xf6, 0x1b, 0x9a, 0xf7, 0xa4, 0xca, 0xdb, 0x20, 0x00, - 0xb2, 0x2d, 0x85, 0x7c, 0x07, 0x5b, 0x99, 0xce, 0xb3, 0xdf, 0xd4, 0xe4, 0xdd, 0xb5, 0x68, 0x4b, - 0x1a, 0x28, 0x96, 0xc1, 0x1e, 0x6e, 0x41, 0x43, 0x09, 0x17, 0x5c, 0x40, 0x6f, 0xc2, 0xe2, 0x42, - 0x24, 0xf9, 0xb5, 0xcd, 0xe0, 0x63, 0xf0, 0x0a, 0xc9, 0x42, 0xc5, 0x0f, 0xe3, 0x48, 0x67, 0xb0, - 0x45, 0xdb, 0x68, 0x3a, 0x47, 0xcb, 0x28, 0x22, 0xdf, 0xc0, 0x3d, 0xc9, 0xc4, 0x7b, 0x26, 0xc2, - 0x39, 0x47, 0x08, 0xc7, 0x2f, 0x91, 0x4c, 0x99, 0xce, 0x55, 0x9b, 0x12, 0xe3, 0x3b, 0x42, 0xd7, - 0xcf, 0xd6, 0x13, 0xfc, 0xde, 0x84, 0xce, 0x68, 0x91, 0xb0, 0x34, 0x1f, 0xf1, 0xf4, 0x32, 0x99, - 0x91, 0x2f, 0xa1, 0x67, 0xb7, 0xc8, 0x23, 0x31, 0x63, 0xb9, 0xc4, 0x53, 0xee, 0x22, 0xb9, 0x6b, - 0xac, 0xe7, 0xc6, 0x48, 0x7e, 0x50, 0x5a, 0x2a, 0x5a, 0x98, 0x5f, 0x67, 0xe6, 0x80, 0xde, 0xc0, - 0x5f, 0xd7, 0x52, 0x01, 0xce, 0xd1, 0xaf, 0x34, 0x74, 0xdf, 0x64, 0x0c, 0x3b, 0xd2, 0x5e, 0x2b, - 0xcc, 0xf4, 0xbd, 0xac, 0x24, 0x7b, 0x55, 0x7a, 0xf5, 0xee, 0xb4, 0x27, 0xab, 0xb9, 0x78, 0x01, - 0x7b, 0xbc, 0xc8, 0xb1, 0x4d, 0xd3, 0x29, 0xa2, 0x43, 0x64, 0xca, 0x30, 0xc3, 0xb0, 0xe3, 0x79, - 0x94, 0xa6, 0x6c, 0xa1, 0xe5, 0x6a, 0xd2, 0x07, 0x25, 0x0c, 0x45, 0xc8, 0x19, 0x13, 0x23, 0x03, - 0x50, 0x7d, 0x66, 0xaf, 0x60, 0x29, 0x52, 0xab, 0xd4, 0xa4, 0x3d, 0x63, 0xb6, 0x38, 0xa9, 0xb2, - 0x1a, 0xc9, 0xeb, 0x34, 0x0e, 0xdd, 0x8d, 0xe7, 0x82, 0xe1, 0xa4, 0xf0, 0xb7, 0x35, 0x9a, 0x68, - 0x9f, 0xbd, 0xab, 0xf1, 0x20, 0xa3, 0x85, 0xf1, 0x98, 0xd4, 0xb4, 0x74, 0x6a, 0xee, 0x57, 0xef, - 0x86, 0xa1, 0xe8, 0xbc, 0x6c, 0x0b, 0xf3, 0xa1, 0xf2, 0xa9, 0x34, 0x77, 0x09, 0x01, 0x9d, 0x90, - 0xb5, 0x7c, 0xde, 0xb4, 0x12, 0x85, 0xc5, 0x4d, 0x5b, 0x1d, 0x82, 0x1b, 0x5e, 0x61, 0xac, 0x35, - 0xf4, 0xbd, 0x8d, 0xad, 0x61, 0x30, 0x46, 0x66, 0xda, 0xcd, 0xca, 0x4b, 0x72, 0x04, 0xfd, 0x39, - 0x96, 0x30, 0x9f, 0xe1, 0x8e, 0x2e, 0x86, 0x8e, 0xde, 0xe5, 0x51, 0x75, 0x97, 0x23, 0x87, 0xb2, - 0x81, 0xec, 0xcc, 0xab, 0x06, 0xf2, 0x10, 0xda, 0x31, 0x17, 0x2c, 0x5c, 0xa0, 0xdd, 0xef, 0x62, - 0xe9, 0x34, 0x69, 0x4b, 0x19, 0x4e, 0x70, 0x4d, 0x1e, 0x01, 0x58, 0xe7, 0x32, 0xc9, 0xfd, 0x9e, - 0xce, 0x5f, 0xdb, 0x78, 0xd1, 0x10, 0xbc, 0x70, 0xb5, 0x38, 0xc1, 0xe1, 0x5b, 0x48, 0xf2, 0x0c, - 0x9a, 0x7a, 0x0c, 0xdb, 0x51, 0xf1, 0x60, 0x53, 0x79, 0x29, 0xa8, 0xa4, 0x06, 0x17, 0xec, 0x41, - 0xe3, 0x55, 0x24, 0xde, 0xa9, 0x11, 0x25, 0x98, 0x64, 0xb9, 0xed, 0x10, 0xb3, 0x08, 0x0a, 0x00, - 0xc3, 0x19, 0x8a, 0x99, 0x24, 0x03, 0xdc, 0x9c, 0xe5, 0x85, 0x9b, 0x43, 0xbb, 0x9b, 0x36, 0x37, - 0xd9, 0xc1, 0xd6, 0x34, 0x50, 0xf2, 0x14, 0x1a, 0x4b, 0xdc, 0xdf, 0xce, 0x1e, 0x52, 0xa5, 0xa8, - 0x93, 0x11, 0xaa, 0x11, 0x87, 0x6d, 0xd8, 0xc6, 0x4e, 0x51, 0x05, 0x10, 0xfc, 0x59, 0x87, 0xce, - 0x44, 0x37, 0x8f, 0x4d, 0x36, 0x6a, 0xed, 0x5a, 0x4c, 0x15, 0x48, 0x6d, 0x53, 0xef, 0x18, 0x82, - 0xe9, 0x1d, 0xb9, 0xfa, 0xde, 0xd4, 0x3b, 0xf5, 0x7f, 0xd1, 0x3b, 0x04, 0x1a, 0x19, 0x17, 0xb9, - 0xed, 0x11, 0xfd, 0x7d, 0x53, 0xe5, 0x2e, 0xb6, 0x0d, 0x55, 0x6e, 0xa3, 0xb2, 0x55, 0x5e, 0x55, - 0xb3, 0xb5, 0xa6, 0xe6, 0x86, 0xba, 0x6c, 0xff, 0xe3, 0xba, 0xac, 0x54, 0x13, 0x54, 0xab, 0x49, - 0xe9, 0x69, 0x02, 0xba, 0x85, 0x9e, 0x65, 0x01, 0xfe, 0xa3, 0x9e, 0x89, 0x93, 0xf3, 0x56, 0x55, - 0x7a, 0x03, 0x75, 0x55, 0xba, 0xca, 0x7e, 0xbd, 0x94, 0x7d, 0xac, 0x58, 0x75, 0x2f, 0x33, 0x0a, - 0x9b, 0xd4, 0x2c, 0x82, 0x2e, 0x78, 0x23, 0xfc, 0xa0, 0xec, 0xb7, 0x02, 0xb7, 0x0b, 0xbe, 0xc0, - 0xfe, 0xd0, 0x4b, 0x99, 0xf1, 0xd4, 0xbc, 0xc4, 0x86, 0x54, 0x2b, 0x93, 0xf0, 0xf9, 0x78, 0xc3, - 0x93, 0x69, 0xf0, 0x57, 0x1d, 0x5a, 0x93, 0x98, 0xa5, 0x91, 0x48, 0xb8, 0x3a, 0x33, 0x8d, 0x96, - 0xa6, 0xd8, 0xda, 0x54, 0x7f, 0xe3, 0x04, 0xed, 0xba, 0x01, 0x68, 0xf4, 0xa9, 0x7f, 0xac, 0x13, - 0x68, 0x27, 0x2e, 0xbf, 0x15, 0x9f, 0x82, 0x97, 0x16, 0x4b, 0x3b, 0x16, 0x5d, 0xe8, 0x80, 0x26, - 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0xee, 0x84, 0xc6, 0xc7, 0xb4, 0xa1, 0x1d, 0x59, 0x6e, 0x15, - 0x7b, 0x82, 0xb1, 0xb9, 0xf9, 0xac, 0x4e, 0x30, 0x1c, 0xa9, 0x9e, 0xab, 0xab, 0x48, 0x2c, 0x8b, - 0x0c, 0x31, 0x78, 0x06, 0xd6, 0xeb, 0x96, 0xc6, 0x74, 0x8d, 0x75, 0x62, 0x8c, 0xea, 0x07, 0xe7, - 0x82, 0xa5, 0xf1, 0x5c, 0x69, 0xb9, 0x42, 0x9a, 0xca, 0xee, 0xaf, 0x1c, 0x0e, 0xfc, 0x1c, 0x7c, - 0x99, 0x45, 0x57, 0x29, 0xfe, 0xa6, 0xc4, 0xf8, 0x33, 0x74, 0xc5, 0xc5, 0x3b, 0x7d, 0x83, 0x22, - 0x75, 0x55, 0x7e, 0x5f, 0xfb, 0x4f, 0x94, 0xfb, 0x57, 0xed, 0x1d, 0x29, 0x67, 0x30, 0x84, 0xb6, - 0x4b, 0xb8, 0xc4, 0xb7, 0xbf, 0x2d, 0xdd, 0x42, 0xbf, 0xa1, 0xde, 0xe0, 0x93, 0xb5, 0x7b, 0x5b, - 0x37, 0xbd, 0x01, 0xee, 0x3f, 0x73, 0x33, 0x4a, 0xb7, 0xfb, 0x0e, 0x78, 0x93, 0xb7, 0xa7, 0xa3, - 0x70, 0x74, 0x72, 0x3c, 0x3e, 0x3d, 0xef, 0xdf, 0x21, 0x7d, 0xe8, 0x0c, 0xcb, 0x96, 0xda, 0xfe, - 0xb1, 0x6b, 0x82, 0x0a, 0x61, 0x32, 0xa6, 0x6f, 0xc6, 0xb4, 0x4c, 0xb0, 0x96, 0x1a, 0xf1, 0xe1, - 0x9e, 0xb1, 0xbc, 0x1c, 0x9f, 0x8e, 0xe9, 0xf1, 0xca, 0x53, 0xdf, 0xff, 0x1c, 0xb6, 0xed, 0xbb, - 0x44, 0xda, 0xd0, 0x7c, 0x7d, 0x3a, 0xa4, 0x6f, 0x71, 0x87, 0x2e, 0x5e, 0xea, 0x9c, 0x8e, 0x87, - 0xaf, 0x8e, 0x4f, 0x5f, 0xf6, 0x6b, 0x17, 0x5b, 0xfa, 0x97, 0xf8, 0xdb, 0xbf, 0x03, 0x00, 0x00, - 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, + // 1179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6f, 0x6f, 0xdb, 0xb6, + 0x13, 0xb6, 0x1d, 0xdb, 0xb1, 0x4e, 0xb6, 0xe3, 0x1f, 0x7f, 0xe9, 0xa0, 0xa6, 0x69, 0x97, 0x6a, + 0x1b, 0x16, 0x64, 0x40, 0x5a, 0x78, 0x05, 0xba, 0x62, 0x2f, 0x02, 0xc7, 0x33, 0xea, 0x00, 0x69, + 0x96, 0xd1, 0x69, 0x87, 0xbe, 0x12, 0x18, 0x99, 0xb1, 0x85, 0xc8, 0xa2, 0x46, 0x52, 0x09, 0xf2, + 0x15, 0xf6, 0x99, 0xf6, 0x39, 0xf6, 0x35, 0xf6, 0x15, 0x06, 0xfe, 0x91, 0x23, 0xb9, 0x06, 0x9a, + 0x6d, 0xef, 0xc4, 0xbb, 0xe7, 0xe1, 0x91, 0xf7, 0xdc, 0x1d, 0x05, 0x9d, 0x90, 0x25, 0x92, 0xb3, + 0xf8, 0x30, 0xe5, 0x4c, 0x32, 0xd4, 0x9e, 0xf1, 0x34, 0x3c, 0x94, 0x54, 0xc8, 0x28, 0x99, 0xed, + 0x74, 0x53, 0x72, 0x17, 0x33, 0x32, 0x15, 0xc6, 0xbb, 0xe3, 0x0a, 0x49, 0xa4, 0x5d, 0xf8, 0x7d, + 0xe8, 0x9c, 0xb3, 0x48, 0x08, 0x96, 0x9c, 0x13, 0x4e, 0x16, 0x02, 0x3d, 0x87, 0x36, 0xbb, 0xba, + 0xa2, 0x9c, 0x4e, 0x03, 0x45, 0xf2, 0xaa, 0x7b, 0xd5, 0xfd, 0x2a, 0x76, 0xad, 0xed, 0x94, 0x91, + 0xa9, 0x4f, 0xa0, 0xf3, 0x3e, 0x89, 0xae, 0x18, 0x5f, 0x58, 0xce, 0xb7, 0xb0, 0x15, 0x25, 0x92, + 0x72, 0xc2, 0x79, 0x74, 0x43, 0xe2, 0x20, 0x66, 0x96, 0xd6, 0x2d, 0x9a, 0x4f, 0xd9, 0x27, 0xc0, + 0x79, 0xe4, 0xd5, 0x3e, 0x05, 0x8e, 0x23, 0xff, 0x07, 0xf8, 0xff, 0x4f, 0x54, 0x52, 0xbe, 0x88, + 0x92, 0x48, 0xc8, 0x28, 0x7c, 0xf8, 0xe1, 0x7e, 0x81, 0xf6, 0x39, 0xe1, 0x54, 0x32, 0x4b, 0xf9, + 0x0e, 0xfe, 0x57, 0x0a, 0x79, 0x49, 0x04, 0xb5, 0xbc, 0x5e, 0xd1, 0x71, 0x4c, 0x04, 0x45, 0xdb, + 0xd0, 0x20, 0x71, 0x3a, 0x27, 0xf6, 0x54, 0x66, 0xe1, 0x23, 0xe8, 0x0d, 0x63, 0x26, 0x54, 0x00, + 0x96, 0x9a, 0x6d, 0xfd, 0x3f, 0x6a, 0x00, 0x2a, 0x9e, 0x8d, 0x32, 0x00, 0x37, 0xd4, 0x90, 0x20, + 0x66, 0x2c, 0xd5, 0xfb, 0xbb, 0xfd, 0x67, 0x87, 0x45, 0x1d, 0x0e, 0x57, 0xf7, 0x18, 0x57, 0x30, + 0x84, 0x4b, 0x1b, 0x7a, 0x0d, 0x9b, 0xa9, 0x51, 0x42, 0x47, 0x77, 0xfb, 0x4f, 0xca, 0xf4, 0x92, + 0x4c, 0xe3, 0x0a, 0xce, 0xd1, 0x8a, 0x98, 0x19, 0x39, 0xbc, 0x8d, 0x75, 0xc4, 0x92, 0x56, 0x8a, + 0x68, 0xd1, 0xe8, 0x47, 0x68, 0x4e, 0x75, 0x92, 0xbd, 0xba, 0xe6, 0x3d, 0x2f, 0xf3, 0xd6, 0x08, + 0x30, 0xae, 0x60, 0x4b, 0x41, 0xaf, 0xa0, 0x99, 0xea, 0x3c, 0x7b, 0x0d, 0x4d, 0xde, 0x59, 0x39, + 0x6d, 0x41, 0x03, 0xc5, 0x32, 0xd8, 0xe3, 0x26, 0xd4, 0x95, 0x70, 0xfe, 0x25, 0x74, 0x27, 0x34, + 0xcc, 0x78, 0x24, 0xef, 0x6c, 0x06, 0x9f, 0x81, 0x9b, 0x09, 0x1a, 0x28, 0x7e, 0x10, 0x12, 0x9d, + 0xc1, 0x16, 0x76, 0x32, 0x41, 0x2f, 0xa8, 0x90, 0x43, 0x82, 0x5e, 0xc2, 0xb6, 0xa0, 0xfc, 0x86, + 0xf2, 0x60, 0xce, 0x84, 0x0c, 0xd8, 0x0d, 0xe5, 0x3c, 0x9a, 0x52, 0x9d, 0x2b, 0x07, 0x23, 0xe3, + 0x1b, 0x33, 0x21, 0x7f, 0xb6, 0x1e, 0xff, 0xf7, 0x06, 0xb4, 0x87, 0x71, 0x44, 0x13, 0x39, 0x64, + 0xc9, 0x55, 0x34, 0x43, 0xdf, 0x40, 0xd7, 0x6e, 0x21, 0x09, 0x9f, 0x51, 0x29, 0xbc, 0xea, 0xde, + 0xc6, 0xbe, 0x83, 0x3b, 0xc6, 0x7a, 0x61, 0x8c, 0xe8, 0x8d, 0xd2, 0x52, 0xd1, 0x02, 0x79, 0x97, + 0x9a, 0x00, 0xdd, 0xbe, 0xb7, 0xaa, 0xa5, 0x02, 0x5c, 0xdc, 0xa5, 0x54, 0x69, 0x98, 0x7f, 0xa3, + 0x11, 0x6c, 0x09, 0x7b, 0xad, 0x20, 0xd5, 0xf7, 0xb2, 0x92, 0xec, 0x96, 0xe9, 0xe5, 0xbb, 0xe3, + 0xae, 0x28, 0xe7, 0xe2, 0x08, 0x76, 0x59, 0x26, 0x85, 0x24, 0xc9, 0x34, 0x4a, 0x66, 0x01, 0x4f, + 0x43, 0x11, 0xa4, 0x94, 0x07, 0xe1, 0x9c, 0x24, 0x09, 0x8d, 0xb5, 0x5c, 0x0d, 0xfc, 0xb8, 0x80, + 0xc1, 0x69, 0x28, 0xce, 0x29, 0x1f, 0x1a, 0x80, 0xea, 0x33, 0x7b, 0x05, 0x4b, 0x11, 0x5a, 0xa5, + 0x06, 0xee, 0x1a, 0xb3, 0xc5, 0x09, 0x95, 0x55, 0x22, 0xee, 0x92, 0x30, 0xc8, 0x6f, 0x3c, 0xe7, + 0x94, 0x4c, 0x85, 0xb7, 0xa9, 0xd1, 0x48, 0xfb, 0xec, 0x5d, 0x8d, 0x07, 0xbd, 0x84, 0x16, 0x4f, + 0x43, 0x93, 0x9a, 0x96, 0x4e, 0xcd, 0xa3, 0xf2, 0xdd, 0x70, 0x1a, 0xea, 0xbc, 0x6c, 0x72, 0xf3, + 0xa1, 0xf2, 0xa9, 0x34, 0xcf, 0x13, 0x02, 0x3a, 0x21, 0x2b, 0xf9, 0xbc, 0x6f, 0x25, 0x0c, 0xf1, + 0x7d, 0x5b, 0x1d, 0x43, 0x3e, 0xbc, 0x82, 0x50, 0x6b, 0xe8, 0xb9, 0x6b, 0x5b, 0xc3, 0x60, 0x8c, + 0xcc, 0xb8, 0x93, 0x16, 0x97, 0x68, 0x0c, 0xbd, 0x79, 0x24, 0x24, 0x9b, 0x71, 0xb2, 0xc8, 0xcf, + 0xd0, 0xd6, 0xbb, 0x3c, 0x2d, 0xef, 0x32, 0xce, 0x51, 0xf6, 0x20, 0x5b, 0xf3, 0xb2, 0x01, 0x3d, + 0x01, 0x27, 0x64, 0x9c, 0x06, 0x71, 0x24, 0xa4, 0xd7, 0xd9, 0xdb, 0xd8, 0x6f, 0xe0, 0x96, 0x32, + 0x9c, 0x46, 0x42, 0xa2, 0xa7, 0x00, 0xd6, 0xb9, 0x88, 0xa4, 0xd7, 0xd5, 0xf9, 0x73, 0x8c, 0x77, + 0x11, 0x49, 0xff, 0x28, 0xaf, 0xc5, 0x89, 0x24, 0x32, 0x13, 0xe8, 0x05, 0x34, 0xf4, 0x18, 0xb6, + 0xa3, 0xe2, 0xf1, 0xba, 0xf2, 0x52, 0x50, 0x81, 0x0d, 0xce, 0xdf, 0x85, 0xfa, 0x3b, 0xc2, 0xaf, + 0xd5, 0x88, 0xe2, 0x54, 0x50, 0x69, 0x3b, 0xc4, 0x2c, 0xfc, 0x0c, 0xc0, 0x70, 0x06, 0x7c, 0x26, + 0x50, 0x1f, 0x1a, 0x82, 0xca, 0x2c, 0x9f, 0x43, 0x3b, 0xeb, 0x36, 0x37, 0xd9, 0x19, 0x57, 0xb0, + 0x81, 0xa2, 0x7d, 0xa8, 0x2f, 0x08, 0xbf, 0xb6, 0xb3, 0x07, 0x95, 0x29, 0x2a, 0xf2, 0xb8, 0x82, + 0x35, 0xe2, 0xd8, 0x81, 0x4d, 0xc2, 0x67, 0xaa, 0x00, 0xfc, 0x3f, 0x6b, 0xd0, 0x9e, 0xe8, 0xe6, + 0xb1, 0xc9, 0x7e, 0x03, 0x6e, 0xde, 0x62, 0xaa, 0x40, 0xaa, 0xeb, 0x7a, 0xc7, 0x10, 0x4c, 0xef, + 0x88, 0xe5, 0xf7, 0xba, 0xde, 0xa9, 0xfd, 0x8b, 0xde, 0x41, 0x50, 0x4f, 0x19, 0x97, 0xb6, 0x47, + 0xf4, 0xf7, 0x7d, 0x95, 0xe7, 0x67, 0x5b, 0x53, 0xe5, 0xf6, 0x54, 0xb6, 0xca, 0xcb, 0x6a, 0xb6, + 0x56, 0xd4, 0x5c, 0x53, 0x97, 0xce, 0x3f, 0xae, 0xcb, 0x52, 0x35, 0x41, 0xb9, 0x9a, 0x94, 0x9e, + 0xe6, 0x40, 0x0f, 0xd0, 0xb3, 0x28, 0xc0, 0x7f, 0xd4, 0x33, 0xca, 0xe5, 0x7c, 0x50, 0x95, 0xde, + 0x43, 0xf3, 0x2a, 0x5d, 0x66, 0xbf, 0x56, 0xc8, 0xfe, 0x36, 0x34, 0xd4, 0xbd, 0xcc, 0x28, 0x6c, + 0x60, 0xb3, 0xf0, 0x3b, 0xe0, 0x0e, 0x19, 0xa7, 0x98, 0xfe, 0x96, 0x51, 0x21, 0xfd, 0xaf, 0xa1, + 0x6d, 0x96, 0x22, 0x65, 0x89, 0x79, 0x89, 0x0d, 0xa9, 0x5a, 0x24, 0x35, 0xa1, 0xfe, 0x81, 0x45, + 0x53, 0xff, 0xaf, 0x1a, 0xb4, 0x26, 0x21, 0x4d, 0x08, 0x8f, 0x98, 0x8a, 0x99, 0x90, 0x85, 0x29, + 0x36, 0x07, 0xeb, 0x6f, 0x74, 0x04, 0x9d, 0x7c, 0x00, 0x1a, 0x7d, 0x6a, 0x9f, 0xeb, 0x04, 0xdc, + 0x0e, 0x8b, 0x6f, 0xc5, 0x97, 0xe0, 0x26, 0xd9, 0xc2, 0x8e, 0xc5, 0xfc, 0xe8, 0x90, 0x64, 0x0b, + 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0x79, 0x84, 0xfa, 0xe7, 0xb4, 0xc1, 0x6d, 0x51, 0x6c, 0x15, + 0x1b, 0xc1, 0xd8, 0xf2, 0xf9, 0xac, 0x22, 0x18, 0x8e, 0x50, 0xcf, 0xd5, 0x2d, 0xe1, 0x8b, 0x2c, + 0x0d, 0x04, 0x0d, 0x59, 0x32, 0x15, 0x5e, 0x53, 0x63, 0x3a, 0xc6, 0x3a, 0x31, 0x46, 0xf5, 0x83, + 0x73, 0x49, 0x93, 0x70, 0xae, 0xb4, 0x5c, 0x22, 0x4d, 0x65, 0xf7, 0x96, 0x8e, 0x1c, 0xfc, 0x1a, + 0x3c, 0x91, 0x92, 0xdb, 0x24, 0x88, 0x59, 0x48, 0xe2, 0xe0, 0x96, 0xf1, 0x6b, 0x7d, 0x83, 0x2c, + 0xc9, 0xab, 0xfc, 0x91, 0xf6, 0x9f, 0x2a, 0xf7, 0xaf, 0xda, 0x3b, 0x54, 0x4e, 0x7f, 0x00, 0x4e, + 0x9e, 0x70, 0x81, 0x5e, 0x81, 0x23, 0xf2, 0x85, 0x7e, 0x43, 0xdd, 0xfe, 0x17, 0x2b, 0xf7, 0xb6, + 0x6e, 0x7c, 0x0f, 0x3c, 0x78, 0x91, 0xcf, 0x28, 0xdd, 0xee, 0x5b, 0xe0, 0x4e, 0x3e, 0x9e, 0x0d, + 0x83, 0xe1, 0xe9, 0xc9, 0xe8, 0xec, 0xa2, 0x57, 0x41, 0x3d, 0x68, 0x0f, 0x8a, 0x96, 0xea, 0xc1, + 0x49, 0xde, 0x04, 0x25, 0xc2, 0x64, 0x84, 0x3f, 0x8c, 0x70, 0x91, 0x60, 0x2d, 0x55, 0xe4, 0xc1, + 0xb6, 0xb1, 0xbc, 0x1d, 0x9d, 0x8d, 0xf0, 0xc9, 0xd2, 0x53, 0x3b, 0xf8, 0x0a, 0x36, 0xed, 0xbb, + 0x84, 0x1c, 0x68, 0xbc, 0x3f, 0x1b, 0xe0, 0x8f, 0xbd, 0x0a, 0xea, 0x80, 0x33, 0xb9, 0xc0, 0xa3, + 0xc1, 0xbb, 0x93, 0xb3, 0xb7, 0xbd, 0xea, 0x65, 0x53, 0xff, 0x12, 0x7f, 0xff, 0x77, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto index e0fe0ec728f7..9379ef49a2e3 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto @@ -1,31 +1,16 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go index b3c8cff7542e..b34c5d59a3b4 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: messages.proto -// DO NOT EDIT! package grpc_testing @@ -80,6 +79,20 @@ func (m *Payload) String() string { return proto.CompactTextString(m) func (*Payload) ProtoMessage() {} func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +func (m *Payload) GetType() PayloadType { + if m != nil { + return m.Type + } + return PayloadType_COMPRESSABLE +} + +func (m *Payload) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + // A protobuf representation for grpc status. This is used by test // clients to specify a status that the server should attempt to return. type EchoStatus struct { @@ -92,6 +105,20 @@ func (m *EchoStatus) String() string { return proto.CompactTextString func (*EchoStatus) ProtoMessage() {} func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } +func (m *EchoStatus) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *EchoStatus) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + // Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. @@ -117,6 +144,20 @@ func (m *SimpleRequest) String() string { return proto.CompactTextStr func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } +func (m *SimpleRequest) GetResponseType() PayloadType { + if m != nil { + return m.ResponseType + } + return PayloadType_COMPRESSABLE +} + +func (m *SimpleRequest) GetResponseSize() int32 { + if m != nil { + return m.ResponseSize + } + return 0 +} + func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload @@ -124,6 +165,27 @@ func (m *SimpleRequest) GetPayload() *Payload { return nil } +func (m *SimpleRequest) GetFillUsername() bool { + if m != nil { + return m.FillUsername + } + return false +} + +func (m *SimpleRequest) GetFillOauthScope() bool { + if m != nil { + return m.FillOauthScope + } + return false +} + +func (m *SimpleRequest) GetResponseCompression() CompressionType { + if m != nil { + return m.ResponseCompression + } + return CompressionType_NONE +} + func (m *SimpleRequest) GetResponseStatus() *EchoStatus { if m != nil { return m.ResponseStatus @@ -154,6 +216,20 @@ func (m *SimpleResponse) GetPayload() *Payload { return nil } +func (m *SimpleResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *SimpleResponse) GetOauthScope() string { + if m != nil { + return m.OauthScope + } + return "" +} + // Client-streaming request. type StreamingInputCallRequest struct { // Optional input payload sent along with the request. @@ -183,6 +259,13 @@ func (m *StreamingInputCallResponse) String() string { return proto.C func (*StreamingInputCallResponse) ProtoMessage() {} func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } +func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { + if m != nil { + return m.AggregatedPayloadSize + } + return 0 +} + // Configuration for a particular response. type ResponseParameters struct { // Desired payload sizes in responses from the server. @@ -198,6 +281,20 @@ func (m *ResponseParameters) String() string { return proto.CompactTe func (*ResponseParameters) ProtoMessage() {} func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } +func (m *ResponseParameters) GetSize() int32 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *ResponseParameters) GetIntervalUs() int32 { + if m != nil { + return m.IntervalUs + } + return 0 +} + // Server-streaming request. type StreamingOutputCallRequest struct { // Desired payload type in the response from the server. @@ -220,6 +317,13 @@ func (m *StreamingOutputCallRequest) String() string { return proto.C func (*StreamingOutputCallRequest) ProtoMessage() {} func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } +func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { + if m != nil { + return m.ResponseType + } + return PayloadType_COMPRESSABLE +} + func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { if m != nil { return m.ResponseParameters @@ -234,6 +338,13 @@ func (m *StreamingOutputCallRequest) GetPayload() *Payload { return nil } +func (m *StreamingOutputCallRequest) GetResponseCompression() CompressionType { + if m != nil { + return m.ResponseCompression + } + return CompressionType_NONE +} + func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { if m != nil { return m.ResponseStatus @@ -270,12 +381,19 @@ func (m *ReconnectParams) String() string { return proto.CompactTextS func (*ReconnectParams) ProtoMessage() {} func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } +func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 { + if m != nil { + return m.MaxReconnectBackoffMs + } + return 0 +} + // For reconnect interop test only. // Server tells client whether its reconnects are following the spec and the // reconnect backoffs it saw. type ReconnectInfo struct { Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"` - BackoffMs []int32 `protobuf:"varint,2,rep,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"` + BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"` } func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} } @@ -283,6 +401,20 @@ func (m *ReconnectInfo) String() string { return proto.CompactTextStr func (*ReconnectInfo) ProtoMessage() {} func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } +func (m *ReconnectInfo) GetPassed() bool { + if m != nil { + return m.Passed + } + return false +} + +func (m *ReconnectInfo) GetBackoffMs() []int32 { + if m != nil { + return m.BackoffMs + } + return nil +} + func init() { proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") @@ -302,46 +434,46 @@ func init() { func init() { proto.RegisterFile("messages.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 645 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0x25, 0xdf, 0xe9, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x51, 0x99, 0x4b, 0x55, 0x89, - 0x20, 0x15, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9b, 0x84, 0x75, 0x7b, 0xe1, 0x62, - 0x6d, 0x9c, 0x4d, 0x1a, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x28, 0x07, 0xee, 0xfc, 0x60, 0xee, 0xec, - 0xae, 0xbd, 0x8e, 0xd3, 0xf6, 0xd0, 0xc2, 0x85, 0xdb, 0xce, 0xcc, 0x9b, 0x97, 0x79, 0x33, 0xcf, - 0x0a, 0xb4, 0x7c, 0xca, 0x39, 0x99, 0x51, 0xde, 0x09, 0x23, 0x26, 0x18, 0x6a, 0xce, 0xa2, 0xd0, - 0xeb, 0x08, 0xca, 0xc5, 0x3c, 0x98, 0xd9, 0xa7, 0x50, 0x1b, 0x91, 0xab, 0x05, 0x23, 0x13, 0xf4, - 0x02, 0xca, 0xe2, 0x2a, 0xa4, 0x56, 0x61, 0xb7, 0xb0, 0xd7, 0x3a, 0xd8, 0xea, 0xe4, 0x71, 0x9d, - 0x14, 0x74, 0x2e, 0x01, 0x58, 0xc3, 0x10, 0x82, 0xf2, 0x98, 0x4d, 0xae, 0xac, 0xa2, 0x84, 0x37, - 0xb1, 0x7e, 0xdb, 0x6f, 0x01, 0x7a, 0xde, 0x25, 0x73, 0x04, 0x11, 0x31, 0x57, 0x08, 0x8f, 0x4d, - 0x12, 0xc2, 0x0a, 0xd6, 0x6f, 0x64, 0x41, 0x2d, 0x9d, 0x47, 0x37, 0xae, 0x61, 0x13, 0xda, 0xbf, - 0x4a, 0xb0, 0xee, 0xcc, 0xfd, 0x70, 0x41, 0x31, 0xfd, 0x1a, 0xcb, 0x9f, 0x45, 0xef, 0x60, 0x3d, - 0xa2, 0x3c, 0x64, 0x01, 0xa7, 0xee, 0xdd, 0x26, 0x6b, 0x1a, 0xbc, 0x8a, 0xd0, 0xf3, 0x5c, 0x3f, - 0x9f, 0xff, 0x48, 0x7e, 0xb1, 0xb2, 0x04, 0x39, 0x32, 0x87, 0x5e, 0x42, 0x2d, 0x4c, 0x18, 0xac, - 0x92, 0x2c, 0x37, 0x0e, 0x36, 0x6f, 0xa5, 0xc7, 0x06, 0xa5, 0x58, 0xa7, 0xf3, 0xc5, 0xc2, 0x8d, - 0x39, 0x8d, 0x02, 0xe2, 0x53, 0xab, 0x2c, 0xdb, 0xea, 0xb8, 0xa9, 0x92, 0x17, 0x69, 0x0e, 0xed, - 0x41, 0x5b, 0x83, 0x18, 0x89, 0xc5, 0xa5, 0xcb, 0x3d, 0x26, 0xa7, 0xaf, 0x68, 0x5c, 0x4b, 0xe5, - 0x87, 0x2a, 0xed, 0xa8, 0x2c, 0x1a, 0xc1, 0xa3, 0x6c, 0x48, 0x8f, 0xf9, 0xa1, 0x0c, 0xf8, 0x9c, - 0x05, 0x56, 0x55, 0x6b, 0xdd, 0x59, 0x1d, 0xe6, 0x68, 0x09, 0xd0, 0x7a, 0x1f, 0x9a, 0xd6, 0x5c, - 0x01, 0x75, 0x61, 0x63, 0x29, 0x5b, 0x5f, 0xc2, 0xaa, 0x69, 0x65, 0xd6, 0x2a, 0xd9, 0xf2, 0x52, - 0xb8, 0x95, 0xad, 0x44, 0xc7, 0xf6, 0x4f, 0x68, 0x99, 0x53, 0x24, 0xf9, 0xfc, 0x9a, 0x0a, 0x77, - 0x5a, 0xd3, 0x36, 0xd4, 0xb3, 0x0d, 0x25, 0x97, 0xce, 0x62, 0xf4, 0x0c, 0x1a, 0xf9, 0xc5, 0x94, - 0x74, 0x19, 0x58, 0xb6, 0x14, 0xe9, 0xca, 0x2d, 0x47, 0x44, 0x94, 0xf8, 0x92, 0xba, 0x1f, 0x84, - 0xb1, 0x38, 0x22, 0x8b, 0x85, 0xb1, 0xc5, 0x7d, 0x47, 0xb1, 0xcf, 0x61, 0xfb, 0x36, 0xb6, 0x54, - 0xd9, 0x6b, 0x78, 0x42, 0x66, 0xb3, 0x88, 0xce, 0x88, 0xa0, 0x13, 0x37, 0xed, 0x49, 0xfc, 0x92, - 0x18, 0x77, 0x73, 0x59, 0x4e, 0xa9, 0x95, 0x71, 0xec, 0x3e, 0x20, 0xc3, 0x31, 0x22, 0x91, 0x94, - 0x25, 0x68, 0xa4, 0x3d, 0x9f, 0x6b, 0xd5, 0x6f, 0x25, 0x77, 0x1e, 0xc8, 0xea, 0x37, 0xa2, 0x5c, - 0x93, 0xba, 0x10, 0x4c, 0xea, 0x82, 0xdb, 0xbf, 0x8b, 0xb9, 0x09, 0x87, 0xb1, 0xb8, 0x26, 0xf8, - 0x5f, 0xbf, 0x83, 0x4f, 0x90, 0xf9, 0x44, 0xea, 0x33, 0xa3, 0xca, 0x39, 0x4a, 0x72, 0x79, 0xbb, - 0xab, 0x2c, 0x37, 0x25, 0x61, 0x14, 0xdd, 0x94, 0x79, 0xef, 0xaf, 0xe6, 0xbf, 0xb4, 0xf9, 0x00, - 0x9e, 0xde, 0xba, 0xf6, 0xbf, 0xf4, 0xbc, 0xfd, 0x11, 0x36, 0x30, 0xf5, 0x58, 0x10, 0x50, 0x4f, - 0xe8, 0x65, 0x71, 0xf4, 0x06, 0x2c, 0x9f, 0x7c, 0x77, 0x23, 0x93, 0x76, 0xc7, 0xc4, 0xfb, 0xc2, - 0xa6, 0x53, 0xd7, 0xe7, 0xc6, 0x5e, 0xb2, 0x9e, 0x75, 0x1d, 0x26, 0xd5, 0x33, 0x6e, 0x9f, 0xc0, - 0x7a, 0x96, 0xed, 0x07, 0x53, 0x86, 0x1e, 0x43, 0x35, 0x24, 0x9c, 0xd3, 0x64, 0x98, 0x3a, 0x4e, - 0x23, 0xb4, 0x03, 0x90, 0xe3, 0x54, 0x47, 0xad, 0xe0, 0xb5, 0xb1, 0xe1, 0xd9, 0x7f, 0x0f, 0x8d, - 0x9c, 0x33, 0x50, 0x1b, 0x9a, 0x47, 0xc3, 0xb3, 0x11, 0xee, 0x39, 0x4e, 0xf7, 0xf0, 0xb4, 0xd7, - 0x7e, 0x20, 0x1d, 0xdb, 0xba, 0x18, 0xac, 0xe4, 0x0a, 0x08, 0xa0, 0x8a, 0xbb, 0x83, 0xe3, 0xe1, - 0x59, 0xbb, 0xb8, 0x7f, 0x00, 0x1b, 0xd7, 0xee, 0x81, 0xea, 0x50, 0x1e, 0x0c, 0x07, 0xaa, 0x59, - 0xbe, 0x3e, 0x7c, 0xee, 0x8f, 0x64, 0x4b, 0x03, 0x6a, 0xc7, 0xbd, 0x93, 0xd3, 0xee, 0x79, 0xaf, - 0x5d, 0x1c, 0x57, 0xf5, 0x5f, 0xcd, 0xab, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, - 0x1e, 0x7c, 0x06, 0x00, 0x00, + // 652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0xf9, 0xee, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x11, 0x99, 0x4b, 0x54, 0x89, + 0x20, 0x05, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9a, 0x84, 0x75, 0x7b, 0xe1, 0x62, + 0x6d, 0x9c, 0x8d, 0x6b, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x9a, 0x1e, 0xb8, 0xf3, 0x83, 0xb9, 0xa3, + 0x5d, 0x7f, 0xc4, 0x69, 0x7b, 0x68, 0xe1, 0xc2, 0x6d, 0xf7, 0xed, 0x9b, 0x97, 0x79, 0x33, 0xcf, + 0x0a, 0x34, 0x3d, 0xca, 0x39, 0x71, 0x28, 0xef, 0x06, 0x21, 0x13, 0x0c, 0x35, 0x9c, 0x30, 0xb0, + 0xbb, 0x82, 0x72, 0xe1, 0xfa, 0x8e, 0x31, 0x82, 0xea, 0x94, 0xac, 0x96, 0x8c, 0xcc, 0xd1, 0x2b, + 0x28, 0x89, 0x55, 0x40, 0x75, 0xad, 0xad, 0x75, 0x9a, 0xbd, 0xbd, 0x6e, 0x9e, 0xd7, 0x4d, 0x48, + 0xe7, 0xab, 0x80, 0x62, 0x45, 0x43, 0x08, 0x4a, 0x33, 0x36, 0x5f, 0xe9, 0x85, 0xb6, 0xd6, 0x69, + 0x60, 0x75, 0x36, 0xde, 0x03, 0x0c, 0xec, 0x4b, 0x66, 0x0a, 0x22, 0x22, 0x2e, 0x19, 0x36, 0x9b, + 0xc7, 0x82, 0x65, 0xac, 0xce, 0x48, 0x87, 0x6a, 0xd2, 0x8f, 0x2a, 0xdc, 0xc2, 0xe9, 0xd5, 0xf8, + 0x55, 0x84, 0x6d, 0xd3, 0xf5, 0x82, 0x25, 0xc5, 0xf4, 0x7b, 0x44, 0xb9, 0x40, 0x1f, 0x60, 0x3b, + 0xa4, 0x3c, 0x60, 0x3e, 0xa7, 0xd6, 0xfd, 0x3a, 0x6b, 0xa4, 0x7c, 0x79, 0x43, 0x2f, 0x73, 0xf5, + 0xdc, 0xbd, 0x8e, 0x7f, 0xb1, 0xbc, 0x26, 0x99, 0xee, 0x35, 0x45, 0xaf, 0xa1, 0x1a, 0xc4, 0x0a, + 0x7a, 0xb1, 0xad, 0x75, 0xea, 0xbd, 0xdd, 0x3b, 0xe5, 0x71, 0xca, 0x92, 0xaa, 0x0b, 0x77, 0xb9, + 0xb4, 0x22, 0x4e, 0x43, 0x9f, 0x78, 0x54, 0x2f, 0xb5, 0xb5, 0x4e, 0x0d, 0x37, 0x24, 0x78, 0x91, + 0x60, 0xa8, 0x03, 0x2d, 0x45, 0x62, 0x24, 0x12, 0x97, 0x16, 0xb7, 0x59, 0x40, 0xf5, 0xb2, 0xe2, + 0x35, 0x25, 0x3e, 0x91, 0xb0, 0x29, 0x51, 0x34, 0x85, 0x27, 0x59, 0x93, 0x36, 0xf3, 0x82, 0x90, + 0x72, 0xee, 0x32, 0x5f, 0xaf, 0x28, 0xaf, 0x07, 0x9b, 0xcd, 0x1c, 0xaf, 0x09, 0xca, 0xef, 0xe3, + 0xb4, 0x34, 0xf7, 0x80, 0xfa, 0xb0, 0xb3, 0xb6, 0xad, 0x36, 0xa1, 0x57, 0x95, 0x33, 0x7d, 0x53, + 0x6c, 0xbd, 0x29, 0xdc, 0xcc, 0x46, 0xa2, 0xee, 0xc6, 0x4f, 0x68, 0xa6, 0xab, 0x88, 0xf1, 0xfc, + 0x98, 0xb4, 0x7b, 0x8d, 0x69, 0x1f, 0x6a, 0xd9, 0x84, 0xe2, 0x4d, 0x67, 0x77, 0xf4, 0x02, 0xea, + 0xf9, 0xc1, 0x14, 0xd5, 0x33, 0xb0, 0x6c, 0x28, 0xc6, 0x08, 0xf6, 0x4c, 0x11, 0x52, 0xe2, 0xb9, + 0xbe, 0x33, 0xf4, 0x83, 0x48, 0x1c, 0x93, 0xe5, 0x32, 0x8d, 0xc5, 0x43, 0x5b, 0x31, 0xce, 0x61, + 0xff, 0x2e, 0xb5, 0xc4, 0xd9, 0x5b, 0x78, 0x46, 0x1c, 0x27, 0xa4, 0x0e, 0x11, 0x74, 0x6e, 0x25, + 0x35, 0x71, 0x5e, 0xe2, 0xe0, 0xee, 0xae, 0x9f, 0x13, 0x69, 0x19, 0x1c, 0x63, 0x08, 0x28, 0xd5, + 0x98, 0x92, 0x90, 0x78, 0x54, 0xd0, 0x50, 0x65, 0x3e, 0x57, 0xaa, 0xce, 0xd2, 0xae, 0xeb, 0x0b, + 0x1a, 0xfe, 0x20, 0x32, 0x35, 0x49, 0x0a, 0x21, 0x85, 0x2e, 0xb8, 0xf1, 0xbb, 0x90, 0xeb, 0x70, + 0x12, 0x89, 0x1b, 0x86, 0xff, 0xf5, 0x3b, 0xf8, 0x02, 0x59, 0x4e, 0xac, 0x20, 0x6b, 0x55, 0x2f, + 0xb4, 0x8b, 0x9d, 0x7a, 0xaf, 0xbd, 0xa9, 0x72, 0xdb, 0x12, 0x46, 0xe1, 0x6d, 0x9b, 0x0f, 0xfe, + 0x6a, 0xfe, 0xcb, 0x98, 0x8f, 0xe1, 0xf9, 0x9d, 0x63, 0xff, 0xcb, 0xcc, 0x1b, 0x9f, 0x61, 0x07, + 0x53, 0x9b, 0xf9, 0x3e, 0xb5, 0x85, 0x1a, 0x16, 0x47, 0xef, 0x40, 0xf7, 0xc8, 0x95, 0x15, 0xa6, + 0xb0, 0x35, 0x23, 0xf6, 0x37, 0xb6, 0x58, 0x58, 0x1e, 0x4f, 0xe3, 0xe5, 0x91, 0xab, 0xac, 0xea, + 0x28, 0x7e, 0x3d, 0xe3, 0xc6, 0x29, 0x6c, 0x67, 0xe8, 0xd0, 0x5f, 0x30, 0xf4, 0x14, 0x2a, 0x01, + 0xe1, 0x9c, 0xc6, 0xcd, 0xd4, 0x70, 0x72, 0x43, 0x07, 0x00, 0x39, 0x4d, 0xb9, 0xd4, 0x32, 0xde, + 0x9a, 0xa5, 0x3a, 0x87, 0x1f, 0xa1, 0x9e, 0x4b, 0x06, 0x6a, 0x41, 0xe3, 0x78, 0x72, 0x36, 0xc5, + 0x03, 0xd3, 0xec, 0x1f, 0x8d, 0x06, 0xad, 0x47, 0x08, 0x41, 0xf3, 0x62, 0xbc, 0x81, 0x69, 0x08, + 0xa0, 0x82, 0xfb, 0xe3, 0x93, 0xc9, 0x59, 0xab, 0x70, 0xd8, 0x83, 0x9d, 0x1b, 0xfb, 0x40, 0x35, + 0x28, 0x8d, 0x27, 0x63, 0x59, 0x5c, 0x83, 0xd2, 0xa7, 0xaf, 0xc3, 0x69, 0x4b, 0x43, 0x75, 0xa8, + 0x9e, 0x0c, 0x4e, 0x47, 0xfd, 0xf3, 0x41, 0xab, 0x30, 0xab, 0xa8, 0xbf, 0x9a, 0x37, 0x7f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, 0x1e, 0x7c, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto index b1abc9e81870..bd83f095fb15 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto @@ -1,31 +1,16 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Message definitions to be used by integration test service definitions. diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go index ffc357d899f6..d70d1f7452fd 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: payloads.proto -// DO NOT EDIT! package grpc_testing @@ -23,6 +22,20 @@ func (m *ByteBufferParams) String() string { return proto.CompactText func (*ByteBufferParams) ProtoMessage() {} func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } +func (m *ByteBufferParams) GetReqSize() int32 { + if m != nil { + return m.ReqSize + } + return 0 +} + +func (m *ByteBufferParams) GetRespSize() int32 { + if m != nil { + return m.RespSize + } + return 0 +} + type SimpleProtoParams struct { ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` @@ -33,6 +46,20 @@ func (m *SimpleProtoParams) String() string { return proto.CompactTex func (*SimpleProtoParams) ProtoMessage() {} func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } +func (m *SimpleProtoParams) GetReqSize() int32 { + if m != nil { + return m.ReqSize + } + return 0 +} + +func (m *SimpleProtoParams) GetRespSize() int32 { + if m != nil { + return m.RespSize + } + return 0 +} + type ComplexProtoParams struct { } @@ -203,21 +230,21 @@ func init() { func init() { proto.RegisterFile("payloads.proto", fileDescriptor2) } var fileDescriptor2 = []byte{ - // 250 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49, 0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2, 0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62, - 0x07, 0xf2, 0x83, 0x81, 0x5c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xd4, 0xe2, 0x02, 0x88, 0x1c, 0x13, - 0x58, 0x8e, 0x03, 0x24, 0x00, 0x92, 0x54, 0xf2, 0xe6, 0x12, 0x0c, 0xce, 0xcc, 0x2d, 0xc8, 0x49, - 0x0d, 0x00, 0x59, 0x44, 0xa1, 0x61, 0x22, 0x5c, 0x42, 0xce, 0xf9, 0x20, 0xc3, 0x2a, 0x90, 0x4c, - 0x53, 0xfa, 0xc6, 0xc8, 0xc5, 0x1b, 0x00, 0xf1, 0x8f, 0x73, 0x7e, 0x5e, 0x5a, 0x66, 0xba, 0x90, - 0x3b, 0x17, 0x5f, 0x12, 0xd0, 0x03, 0x49, 0xa5, 0x69, 0xf1, 0x05, 0x60, 0x35, 0x60, 0x5b, 0xb8, - 0x8d, 0xe4, 0xf4, 0x90, 0xfd, 0xa9, 0x87, 0xee, 0x49, 0x0f, 0x86, 0x20, 0x5e, 0xa8, 0x3e, 0xa8, - 0x43, 0xdd, 0xb8, 0x78, 0x8b, 0xc1, 0xae, 0x87, 0x99, 0xc3, 0x04, 0x36, 0x47, 0x1e, 0xd5, 0x1c, - 0x0c, 0x0f, 0x02, 0x0d, 0xe2, 0x81, 0xe8, 0x83, 0x9a, 0xe3, 0xc9, 0xc5, 0x97, 0x0c, 0x71, 0x38, - 0xcc, 0x20, 0x66, 0xb0, 0x41, 0x0a, 0xa8, 0x06, 0x61, 0x7a, 0x0e, 0xe4, 0x24, 0xa8, 0x4e, 0x88, - 0x80, 0x13, 0x27, 0x17, 0x3b, 0x34, 0xf2, 0x92, 0xd8, 0xc0, 0x91, 0x67, 0x0c, 0x08, 0x00, 0x00, - 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, + 0x2f, 0x4a, 0x2d, 0x0c, 0xce, 0xac, 0x4a, 0x15, 0x92, 0xe6, 0xe2, 0x2c, 0x4a, 0x2d, 0x2e, 0x80, + 0xc8, 0x31, 0x81, 0xe5, 0x38, 0x40, 0x02, 0x20, 0x49, 0x25, 0x6f, 0x2e, 0xc1, 0xe0, 0xcc, 0xdc, + 0x82, 0x9c, 0xd4, 0x00, 0x90, 0x45, 0x14, 0x1a, 0x26, 0xc2, 0x25, 0xe4, 0x9c, 0x0f, 0x32, 0xac, + 0x02, 0xc9, 0x34, 0xa5, 0x6f, 0x8c, 0x5c, 0xbc, 0x01, 0x10, 0xff, 0x38, 0xe7, 0xe7, 0xa5, 0x65, + 0xa6, 0x0b, 0xb9, 0x73, 0xf1, 0x25, 0x55, 0x96, 0xa4, 0x26, 0x95, 0xa6, 0xc5, 0x17, 0x80, 0xd5, + 0x80, 0x6d, 0xe1, 0x36, 0x92, 0xd3, 0x43, 0xf6, 0xa7, 0x1e, 0xba, 0x27, 0x3d, 0x18, 0x82, 0x78, + 0xa1, 0xfa, 0xa0, 0x0e, 0x75, 0xe3, 0xe2, 0x2d, 0x06, 0xbb, 0x1e, 0x66, 0x0e, 0x13, 0xd8, 0x1c, + 0x79, 0x54, 0x73, 0x30, 0x3c, 0xe8, 0xc1, 0x10, 0xc4, 0x03, 0xd1, 0x07, 0x35, 0xc7, 0x93, 0x8b, + 0x2f, 0x19, 0xe2, 0x70, 0x98, 0x41, 0xcc, 0x60, 0x83, 0x14, 0x50, 0x0d, 0xc2, 0xf4, 0x1c, 0xc8, + 0x49, 0x50, 0x9d, 0x10, 0x01, 0x27, 0x4e, 0x2e, 0x76, 0x68, 0xe4, 0x25, 0xb1, 0x81, 0x23, 0xcf, + 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto index 056fe0c72ed9..5d4871f5f9fd 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto @@ -1,31 +1,16 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go index 2aae3179bc45..50e3505957e0 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: services.proto -// DO NOT EDIT! package grpc_testing @@ -423,21 +422,21 @@ var _WorkerService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("services.proto", fileDescriptor3) } var fileDescriptor3 = []byte{ - // 254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, + // 255 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, 0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17, - 0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0x4d, 0xea, 0xcc, 0x44, 0xf0, - 0x49, 0x7c, 0x07, 0x9f, 0xd2, 0xee, 0x66, 0x0b, 0xb5, 0xe4, 0xb6, 0xc7, 0xf9, 0xbf, 0xe1, 0x23, - 0x7f, 0x46, 0x2c, 0x08, 0xf0, 0xcb, 0x34, 0x40, 0x65, 0x8f, 0x9e, 0xbd, 0x3c, 0x69, 0xb1, 0x6f, - 0x4a, 0x06, 0x62, 0xe3, 0x5a, 0xb5, 0xe8, 0x80, 0x48, 0xb7, 0x23, 0x55, 0x45, 0xe3, 0x1d, 0xa3, - 0xb7, 0x71, 0x5c, 0xfe, 0x66, 0xe2, 0x74, 0x05, 0xae, 0xf9, 0xe8, 0x34, 0x6e, 0xea, 0x28, 0x92, - 0x0f, 0x22, 0x7f, 0x76, 0x1a, 0xbf, 0x2b, 0x6d, 0xad, 0xbc, 0x28, 0xa7, 0xbe, 0xb2, 0x36, 0x5d, - 0x6f, 0x61, 0x0d, 0x9f, 0x61, 0x08, 0xd4, 0x65, 0x1a, 0x52, 0xef, 0x1d, 0x81, 0x7c, 0x14, 0x45, - 0xcd, 0x08, 0xba, 0x1b, 0xd8, 0x81, 0xae, 0xab, 0xec, 0x3a, 0x5b, 0xfe, 0x1c, 0x89, 0xe2, 0xd5, - 0xe3, 0x06, 0x70, 0x7c, 0xe9, 0xbd, 0xc8, 0xd7, 0xc1, 0x6d, 0x27, 0x40, 0x79, 0x36, 0x13, 0xec, - 0xd2, 0x3b, 0x6c, 0x49, 0xa9, 0x14, 0xa9, 0x59, 0x73, 0xa0, 0xad, 0x78, 0xaf, 0xa9, 0xac, 0x01, - 0xc7, 0x73, 0x4d, 0x4c, 0x53, 0x9a, 0x48, 0x26, 0x9a, 0x95, 0xc8, 0x2b, 0x8f, 0x50, 0xf9, 0x30, - 0x68, 0xce, 0x67, 0xcb, 0x03, 0x18, 0x9b, 0xaa, 0x14, 0xda, 0xff, 0xd9, 0xad, 0x10, 0x4f, 0xc1, - 0x70, 0xac, 0x29, 0xe5, 0xff, 0xcd, 0x17, 0x6f, 0xde, 0x55, 0x22, 0x7b, 0x3b, 0xde, 0x5d, 0xf3, - 0xe6, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00, + 0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0xcd, 0xd4, 0x99, 0x89, 0xe0, + 0x93, 0xf8, 0x0e, 0x3e, 0xa5, 0xec, 0x66, 0x57, 0xd6, 0x92, 0x9b, 0xc7, 0xf9, 0xbf, 0xe1, 0x23, + 0x7f, 0x46, 0xcd, 0x18, 0xe8, 0xc3, 0x75, 0xc0, 0xf5, 0x48, 0x28, 0xa8, 0x8f, 0x7a, 0x1a, 0xbb, + 0x5a, 0x80, 0xc5, 0x85, 0xde, 0xcc, 0x06, 0x60, 0xb6, 0xfd, 0x8e, 0x9a, 0xaa, 0xc3, 0x20, 0x84, + 0x3e, 0x8d, 0xf3, 0xef, 0x42, 0x1d, 0x2f, 0x20, 0x74, 0x6f, 0x83, 0xa5, 0x55, 0x9b, 0x44, 0xfa, + 0x4e, 0x95, 0x8f, 0xc1, 0xd2, 0x67, 0x63, 0xbd, 0xd7, 0x67, 0xf5, 0xbe, 0xaf, 0x6e, 0xdd, 0x30, + 0x7a, 0x58, 0xc2, 0x7b, 0x04, 0x16, 0x73, 0x9e, 0x87, 0x3c, 0x62, 0x60, 0xd0, 0xf7, 0xaa, 0x6a, + 0x85, 0xc0, 0x0e, 0x2e, 0xf4, 0xff, 0x74, 0x5d, 0x14, 0x97, 0xc5, 0xfc, 0xeb, 0x40, 0x55, 0xcf, + 0x48, 0x2b, 0xa0, 0xdd, 0x4b, 0x6f, 0x55, 0xb9, 0x8c, 0x61, 0x3d, 0x01, 0xe9, 0x93, 0x89, 0x60, + 0x93, 0xde, 0x50, 0xcf, 0xc6, 0xe4, 0x48, 0x2b, 0x56, 0x22, 0xaf, 0xc5, 0x5b, 0x4d, 0xe3, 0x1d, + 0x04, 0x99, 0x6a, 0x52, 0x9a, 0xd3, 0x24, 0xb2, 0xa7, 0x59, 0xa8, 0xb2, 0x41, 0x82, 0x06, 0x63, + 0x10, 0x7d, 0x3a, 0x59, 0x46, 0xfa, 0x6d, 0x6a, 0x72, 0x68, 0xfb, 0x67, 0xd7, 0x4a, 0x3d, 0x44, + 0x27, 0xa9, 0xa6, 0xd6, 0x7f, 0x37, 0x9f, 0xd0, 0xbd, 0x9a, 0x4c, 0xf6, 0x72, 0xb8, 0xb9, 0xe6, + 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto index c2acca7f1a3a..f4e7907824b0 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto @@ -1,31 +1,16 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go index 45b9b4af6908..d69cb7410ee4 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: stats.proto -// DO NOT EDIT! package grpc_testing @@ -28,6 +27,27 @@ func (m *ServerStats) String() string { return proto.CompactTextStrin func (*ServerStats) ProtoMessage() {} func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } +func (m *ServerStats) GetTimeElapsed() float64 { + if m != nil { + return m.TimeElapsed + } + return 0 +} + +func (m *ServerStats) GetTimeUser() float64 { + if m != nil { + return m.TimeUser + } + return 0 +} + +func (m *ServerStats) GetTimeSystem() float64 { + if m != nil { + return m.TimeSystem + } + return 0 +} + // Histogram params based on grpc/support/histogram.c type HistogramParams struct { Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"` @@ -39,9 +59,23 @@ func (m *HistogramParams) String() string { return proto.CompactTextS func (*HistogramParams) ProtoMessage() {} func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } +func (m *HistogramParams) GetResolution() float64 { + if m != nil { + return m.Resolution + } + return 0 +} + +func (m *HistogramParams) GetMaxPossible() float64 { + if m != nil { + return m.MaxPossible + } + return 0 +} + // Histogram data based on grpc/support/histogram.c type HistogramData struct { - Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` + Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket" json:"bucket,omitempty"` MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"` MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"` Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` @@ -54,6 +88,48 @@ func (m *HistogramData) String() string { return proto.CompactTextStr func (*HistogramData) ProtoMessage() {} func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } +func (m *HistogramData) GetBucket() []uint32 { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *HistogramData) GetMinSeen() float64 { + if m != nil { + return m.MinSeen + } + return 0 +} + +func (m *HistogramData) GetMaxSeen() float64 { + if m != nil { + return m.MaxSeen + } + return 0 +} + +func (m *HistogramData) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *HistogramData) GetSumOfSquares() float64 { + if m != nil { + return m.SumOfSquares + } + return 0 +} + +func (m *HistogramData) GetCount() float64 { + if m != nil { + return m.Count + } + return 0 +} + type ClientStats struct { // Latency histogram. Data points are in nanoseconds. Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` @@ -75,6 +151,27 @@ func (m *ClientStats) GetLatencies() *HistogramData { return nil } +func (m *ClientStats) GetTimeElapsed() float64 { + if m != nil { + return m.TimeElapsed + } + return 0 +} + +func (m *ClientStats) GetTimeUser() float64 { + if m != nil { + return m.TimeUser + } + return 0 +} + +func (m *ClientStats) GetTimeSystem() float64 { + if m != nil { + return m.TimeSystem + } + return 0 +} + func init() { proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams") @@ -85,27 +182,27 @@ func init() { func init() { proto.RegisterFile("stats.proto", fileDescriptor4) } var fileDescriptor4 = []byte{ - // 342 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x4f, 0xe3, 0x30, - 0x10, 0xc5, 0x95, 0xa6, 0xed, 0xb6, 0x93, 0x76, 0x77, 0x65, 0xad, 0x56, 0x41, 0x95, 0xf8, 0x13, - 0x71, 0xe8, 0x29, 0x07, 0x38, 0x71, 0x06, 0x24, 0x6e, 0x54, 0x0d, 0x9c, 0x23, 0x37, 0x4c, 0x2b, - 0x8b, 0xc4, 0x0e, 0x99, 0x09, 0x2a, 0x1f, 0x09, 0xf1, 0x25, 0x71, 0x9c, 0x08, 0x0a, 0x48, 0x70, - 0x49, 0xf2, 0x7e, 0x6f, 0x34, 0xe3, 0xc9, 0x33, 0x04, 0xc4, 0x92, 0x29, 0x2e, 0x2b, 0xc3, 0x46, - 0x4c, 0x36, 0x55, 0x99, 0xc5, 0x8c, 0xc4, 0x4a, 0x6f, 0x22, 0x0d, 0x41, 0x82, 0xd5, 0x23, 0x56, - 0x49, 0x53, 0x22, 0x8e, 0x60, 0xc2, 0xaa, 0xc0, 0x14, 0x73, 0x59, 0x12, 0xde, 0x85, 0xde, 0xa1, - 0x37, 0xf7, 0x96, 0x41, 0xc3, 0x2e, 0x5b, 0x24, 0x66, 0x30, 0x76, 0x25, 0x35, 0x61, 0x15, 0xf6, - 0x9c, 0x3f, 0x6a, 0xc0, 0xad, 0xd5, 0xe2, 0x00, 0x5c, 0x6d, 0x4a, 0x4f, 0xc4, 0x58, 0x84, 0xbe, - 0xb3, 0xa1, 0x41, 0x89, 0x23, 0xd1, 0x0d, 0xfc, 0xb9, 0x52, 0xc4, 0x66, 0x53, 0xc9, 0x62, 0x21, - 0xed, 0x83, 0xc4, 0x3e, 0x40, 0x85, 0x64, 0xf2, 0x9a, 0x95, 0xd1, 0xdd, 0xc4, 0x1d, 0xd2, 0x9c, - 0xa9, 0x90, 0xdb, 0xb4, 0x34, 0x44, 0x6a, 0x95, 0x63, 0x37, 0x33, 0xb0, 0x6c, 0xd1, 0xa1, 0xe8, - 0xc5, 0x83, 0xe9, 0x5b, 0xdb, 0x0b, 0xc9, 0x52, 0xfc, 0x87, 0xe1, 0xaa, 0xce, 0xee, 0x91, 0x6d, - 0x43, 0x7f, 0x3e, 0x5d, 0x76, 0x4a, 0xec, 0xc1, 0xa8, 0x50, 0x3a, 0x25, 0x44, 0xdd, 0x35, 0xfa, - 0x65, 0x75, 0x62, 0xa5, 0xb3, 0xec, 0x1c, 0x67, 0xf9, 0x9d, 0x25, 0xb7, 0xce, 0xfa, 0x0b, 0x3e, - 0xd5, 0x45, 0xd8, 0x77, 0xb4, 0xf9, 0x14, 0xc7, 0xf0, 0xdb, 0xbe, 0x52, 0xb3, 0x4e, 0xe9, 0xa1, - 0x96, 0xf6, 0xb4, 0xe1, 0xc0, 0x99, 0x13, 0x4b, 0xaf, 0xd7, 0x49, 0xcb, 0xc4, 0x3f, 0x18, 0x64, - 0xa6, 0xd6, 0x1c, 0x0e, 0x9d, 0xd9, 0x8a, 0xe8, 0xd9, 0x83, 0xe0, 0x3c, 0x57, 0xa8, 0xb9, 0xfd, - 0xe9, 0x67, 0x30, 0xce, 0x25, 0xa3, 0xce, 0x94, 0x6d, 0xd3, 0xec, 0x1f, 0x9c, 0xcc, 0xe2, 0xdd, - 0x94, 0xe2, 0x0f, 0xbb, 0x2d, 0xdf, 0xab, 0xbf, 0xe4, 0xd5, 0xfb, 0x21, 0x2f, 0xff, 0xfb, 0xbc, - 0xfa, 0x9f, 0xf3, 0x5a, 0x0d, 0xdd, 0xa5, 0x39, 0x7d, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, - 0x34, 0x90, 0x43, 0x02, 0x00, 0x00, + // 341 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0x49, 0xd3, 0xf6, 0xb6, 0x27, 0xed, 0xbd, 0x97, 0x41, 0x24, 0x52, 0xd0, 0x1a, 0x5c, + 0x74, 0x95, 0x85, 0xae, 0x5c, 0xab, 0xe0, 0xce, 0xd2, 0xe8, 0x3a, 0x4c, 0xe3, 0x69, 0x19, 0xcc, + 0xcc, 0xc4, 0x39, 0x33, 0x12, 0x1f, 0x49, 0x7c, 0x49, 0xc9, 0x24, 0x68, 0x55, 0xd0, 0x5d, 0xe6, + 0xfb, 0x7e, 0xe6, 0xe4, 0xe4, 0x0f, 0x44, 0x64, 0xb9, 0xa5, 0xb4, 0x32, 0xda, 0x6a, 0x36, 0xd9, + 0x9a, 0xaa, 0x48, 0x2d, 0x92, 0x15, 0x6a, 0x9b, 0x28, 0x88, 0x32, 0x34, 0x4f, 0x68, 0xb2, 0x26, + 0xc2, 0x8e, 0x61, 0x62, 0x85, 0xc4, 0x1c, 0x4b, 0x5e, 0x11, 0xde, 0xc7, 0xc1, 0x3c, 0x58, 0x04, + 0xab, 0xa8, 0x61, 0x57, 0x2d, 0x62, 0x33, 0x18, 0xfb, 0x88, 0x23, 0x34, 0x71, 0xcf, 0xfb, 0x51, + 0x03, 0xee, 0x08, 0x0d, 0x3b, 0x02, 0x9f, 0xcd, 0xe9, 0x99, 0x2c, 0xca, 0x38, 0xf4, 0x1a, 0x1a, + 0x94, 0x79, 0x92, 0xdc, 0xc2, 0xbf, 0x6b, 0x41, 0x56, 0x6f, 0x0d, 0x97, 0x4b, 0x6e, 0xb8, 0x24, + 0x76, 0x08, 0x60, 0x90, 0x74, 0xe9, 0xac, 0xd0, 0xaa, 0x9b, 0xb8, 0x43, 0x9a, 0x77, 0x92, 0xbc, + 0xce, 0x2b, 0x4d, 0x24, 0xd6, 0x25, 0x76, 0x33, 0x23, 0xc9, 0xeb, 0x65, 0x87, 0x92, 0xd7, 0x00, + 0xa6, 0xef, 0xd7, 0x5e, 0x72, 0xcb, 0xd9, 0x3e, 0x0c, 0xd7, 0xae, 0x78, 0x40, 0x1b, 0x07, 0xf3, + 0x70, 0x31, 0x5d, 0x75, 0x27, 0x76, 0x00, 0x23, 0x29, 0x54, 0x4e, 0x88, 0xaa, 0xbb, 0xe8, 0x8f, + 0x14, 0x2a, 0x43, 0x54, 0x5e, 0xf1, 0xba, 0x55, 0x61, 0xa7, 0x78, 0xed, 0xd5, 0x7f, 0x08, 0xc9, + 0xc9, 0xb8, 0xef, 0x69, 0xf3, 0xc8, 0x4e, 0xe0, 0x2f, 0x39, 0x99, 0xeb, 0x4d, 0x4e, 0x8f, 0x8e, + 0x1b, 0xa4, 0x78, 0xe0, 0xe5, 0x84, 0x9c, 0xbc, 0xd9, 0x64, 0x2d, 0x63, 0x7b, 0x30, 0x28, 0xb4, + 0x53, 0x36, 0x1e, 0x7a, 0xd9, 0x1e, 0x92, 0x97, 0x00, 0xa2, 0x8b, 0x52, 0xa0, 0xb2, 0xed, 0x47, + 0x3f, 0x87, 0x71, 0xc9, 0x2d, 0xaa, 0x42, 0x20, 0xf9, 0xfd, 0xa3, 0xd3, 0x59, 0xba, 0xdb, 0x52, + 0xfa, 0x69, 0xb7, 0xd5, 0x47, 0xfa, 0x5b, 0x5f, 0xbd, 0x5f, 0xfa, 0x0a, 0x7f, 0xee, 0xab, 0xff, + 0xb5, 0xaf, 0xf5, 0xd0, 0xff, 0x34, 0x67, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, 0x34, + 0x90, 0x43, 0x02, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto index 9bc3cb216ba5..baf3610f3351 100644 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto +++ b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto @@ -1,31 +1,16 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; diff --git a/vendor/google.golang.org/grpc/benchmark/latency/latency.go b/vendor/google.golang.org/grpc/benchmark/latency/latency.go new file mode 100644 index 000000000000..5839a5c4429a --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/latency/latency.go @@ -0,0 +1,316 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package latency provides wrappers for net.Conn, net.Listener, and +// net.Dialers, designed to interoperate to inject real-world latency into +// network connections. +package latency + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "time" + + "golang.org/x/net/context" +) + +// Dialer is a function matching the signature of net.Dial. +type Dialer func(network, address string) (net.Conn, error) + +// TimeoutDialer is a function matching the signature of net.DialTimeout. +type TimeoutDialer func(network, address string, timeout time.Duration) (net.Conn, error) + +// ContextDialer is a function matching the signature of +// net.Dialer.DialContext. +type ContextDialer func(ctx context.Context, network, address string) (net.Conn, error) + +// Network represents a network with the given bandwidth, latency, and MTU +// (Maximum Transmission Unit) configuration, and can produce wrappers of +// net.Listeners, net.Conn, and various forms of dialing functions. The +// Listeners and Dialers/Conns on both sides of connections must come from this +// package, but need not be created from the same Network. Latency is computed +// when sending (in Write), and is injected when receiving (in Read). This +// allows senders' Write calls to be non-blocking, as in real-world +// applications. +// +// Note: Latency is injected by the sender specifying the absolute time data +// should be available, and the reader delaying until that time arrives to +// provide the data. This package attempts to counter-act the effects of clock +// drift and existing network latency by measuring the delay between the +// sender's transmission time and the receiver's reception time during startup. +// No attempt is made to measure the existing bandwidth of the connection. +type Network struct { + Kbps int // Kilobits per second; if non-positive, infinite + Latency time.Duration // One-way latency (sending); if non-positive, no delay + MTU int // Bytes per packet; if non-positive, infinite +} + +var ( + //Local simulates local network. + Local = Network{0, 0, 0} + //LAN simulates local area network network. + LAN = Network{100 * 1024, 2 * time.Millisecond, 1500} + //WAN simulates wide area network. + WAN = Network{20 * 1024, 30 * time.Millisecond, 1500} + //Longhaul simulates bad network. + Longhaul = Network{1000 * 1024, 200 * time.Millisecond, 9000} +) + +// Conn returns a net.Conn that wraps c and injects n's latency into that +// connection. This function also imposes latency for connection creation. +// If n's Latency is lower than the measured latency in c, an error is +// returned. +func (n *Network) Conn(c net.Conn) (net.Conn, error) { + start := now() + nc := &conn{Conn: c, network: n, readBuf: new(bytes.Buffer)} + if err := nc.sync(); err != nil { + return nil, err + } + sleep(start.Add(nc.delay).Sub(now())) + return nc, nil +} + +type conn struct { + net.Conn + network *Network + + readBuf *bytes.Buffer // one packet worth of data received + lastSendEnd time.Time // time the previous Write should be fully on the wire + delay time.Duration // desired latency - measured latency +} + +// header is sent before all data transmitted by the application. +type header struct { + ReadTime int64 // Time the reader is allowed to read this packet (UnixNano) + Sz int32 // Size of the data in the packet +} + +func (c *conn) Write(p []byte) (n int, err error) { + tNow := now() + if c.lastSendEnd.Before(tNow) { + c.lastSendEnd = tNow + } + for len(p) > 0 { + pkt := p + if c.network.MTU > 0 && len(pkt) > c.network.MTU { + pkt = pkt[:c.network.MTU] + p = p[c.network.MTU:] + } else { + p = nil + } + if c.network.Kbps > 0 { + if congestion := c.lastSendEnd.Sub(tNow) - c.delay; congestion > 0 { + // The network is full; sleep until this packet can be sent. + sleep(congestion) + tNow = tNow.Add(congestion) + } + } + c.lastSendEnd = c.lastSendEnd.Add(c.network.pktTime(len(pkt))) + hdr := header{ReadTime: c.lastSendEnd.Add(c.delay).UnixNano(), Sz: int32(len(pkt))} + if err := binary.Write(c.Conn, binary.BigEndian, hdr); err != nil { + return n, err + } + x, err := c.Conn.Write(pkt) + n += x + if err != nil { + return n, err + } + } + return n, nil +} + +func (c *conn) Read(p []byte) (n int, err error) { + if c.readBuf.Len() == 0 { + var hdr header + if err := binary.Read(c.Conn, binary.BigEndian, &hdr); err != nil { + return 0, err + } + defer func() { sleep(time.Unix(0, hdr.ReadTime).Sub(now())) }() + + if _, err := io.CopyN(c.readBuf, c.Conn, int64(hdr.Sz)); err != nil { + return 0, err + } + } + // Read from readBuf. + return c.readBuf.Read(p) +} + +// sync does a handshake and then measures the latency on the network in +// coordination with the other side. +func (c *conn) sync() error { + const ( + pingMsg = "syncPing" + warmup = 10 // minimum number of iterations to measure latency + giveUp = 50 // maximum number of iterations to measure latency + accuracy = time.Millisecond // req'd accuracy to stop early + goodRun = 3 // stop early if latency within accuracy this many times + ) + + type syncMsg struct { + SendT int64 // Time sent. If zero, stop. + RecvT int64 // Time received. If zero, fill in and respond. + } + + // A trivial handshake + if err := binary.Write(c.Conn, binary.BigEndian, []byte(pingMsg)); err != nil { + return err + } + var ping [8]byte + if err := binary.Read(c.Conn, binary.BigEndian, &ping); err != nil { + return err + } else if string(ping[:]) != pingMsg { + return fmt.Errorf("malformed handshake message: %v (want %q)", ping, pingMsg) + } + + // Both sides are alive and syncing. Calculate network delay / clock skew. + att := 0 + good := 0 + var latency time.Duration + localDone, remoteDone := false, false + send := true + for !localDone || !remoteDone { + if send { + if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{SendT: now().UnixNano()}); err != nil { + return err + } + att++ + send = false + } + + // Block until we get a syncMsg + m := syncMsg{} + if err := binary.Read(c.Conn, binary.BigEndian, &m); err != nil { + return err + } + + if m.RecvT == 0 { + // Message initiated from other side. + if m.SendT == 0 { + remoteDone = true + continue + } + // Send response. + m.RecvT = now().UnixNano() + if err := binary.Write(c.Conn, binary.BigEndian, m); err != nil { + return err + } + continue + } + + lag := time.Duration(m.RecvT - m.SendT) + latency += lag + avgLatency := latency / time.Duration(att) + if e := lag - avgLatency; e > -accuracy && e < accuracy { + good++ + } else { + good = 0 + } + if att < giveUp && (att < warmup || good < goodRun) { + send = true + continue + } + localDone = true + latency = avgLatency + // Tell the other side we're done. + if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{}); err != nil { + return err + } + } + if c.network.Latency <= 0 { + return nil + } + c.delay = c.network.Latency - latency + if c.delay < 0 { + return fmt.Errorf("measured network latency (%v) higher than desired latency (%v)", latency, c.network.Latency) + } + return nil +} + +// Listener returns a net.Listener that wraps l and injects n's latency in its +// connections. +func (n *Network) Listener(l net.Listener) net.Listener { + return &listener{Listener: l, network: n} +} + +type listener struct { + net.Listener + network *Network +} + +func (l *listener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + return l.network.Conn(c) +} + +// Dialer returns a Dialer that wraps d and injects n's latency in its +// connections. n's Latency is also injected to the connection's creation. +func (n *Network) Dialer(d Dialer) Dialer { + return func(network, address string) (net.Conn, error) { + conn, err := d(network, address) + if err != nil { + return nil, err + } + return n.Conn(conn) + } +} + +// TimeoutDialer returns a TimeoutDialer that wraps d and injects n's latency +// in its connections. n's Latency is also injected to the connection's +// creation. +func (n *Network) TimeoutDialer(d TimeoutDialer) TimeoutDialer { + return func(network, address string, timeout time.Duration) (net.Conn, error) { + conn, err := d(network, address, timeout) + if err != nil { + return nil, err + } + return n.Conn(conn) + } +} + +// ContextDialer returns a ContextDialer that wraps d and injects n's latency +// in its connections. n's Latency is also injected to the connection's +// creation. +func (n *Network) ContextDialer(d ContextDialer) ContextDialer { + return func(ctx context.Context, network, address string) (net.Conn, error) { + conn, err := d(ctx, network, address) + if err != nil { + return nil, err + } + return n.Conn(conn) + } +} + +// pktTime returns the time it takes to transmit one packet of data of size b +// in bytes. +func (n *Network) pktTime(b int) time.Duration { + if n.Kbps <= 0 { + return time.Duration(0) + } + return time.Duration(b) * time.Second / time.Duration(n.Kbps*(1024/8)) +} + +// Wrappers for testing + +var now = time.Now +var sleep = time.Sleep diff --git a/vendor/google.golang.org/grpc/benchmark/latency/latency_test.go b/vendor/google.golang.org/grpc/benchmark/latency/latency_test.go new file mode 100644 index 000000000000..ab2625bf1af0 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/latency/latency_test.go @@ -0,0 +1,353 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package latency + +import ( + "bytes" + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" +) + +// bufConn is a net.Conn implemented by a bytes.Buffer (which is a ReadWriter). +type bufConn struct { + *bytes.Buffer +} + +func (bufConn) Close() error { panic("unimplemented") } +func (bufConn) LocalAddr() net.Addr { panic("unimplemented") } +func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") } +func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") } +func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") } +func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") } + +func restoreHooks() func() { + s := sleep + n := now + return func() { + sleep = s + now = n + } +} + +func TestConn(t *testing.T) { + defer restoreHooks()() + + // Constant time. + now = func() time.Time { return time.Unix(123, 456) } + + // Capture sleep times for checking later. + var sleepTimes []time.Duration + sleep = func(t time.Duration) { sleepTimes = append(sleepTimes, t) } + + wantSleeps := func(want ...time.Duration) { + if !reflect.DeepEqual(want, sleepTimes) { + t.Fatalf("sleepTimes = %v; want %v", sleepTimes, want) + } + sleepTimes = nil + } + + // Use a fairly high latency to cause a large BDP and avoid sleeps while + // writing due to simulation of full buffers. + latency := 1 * time.Second + c, err := (&Network{Kbps: 1, Latency: latency, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + wantSleeps(latency) // Connection creation delay. + + // 1 kbps = 128 Bps. Divides evenly by 1 second using nanos. + byteLatency := time.Duration(time.Second / 128) + + write := func(b []byte) { + n, err := c.Write(b) + if n != len(b) || err != nil { + t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b)) + } + } + + write([]byte{1, 2, 3, 4, 5}) // One full packet + pkt1Time := latency + byteLatency*5 + write([]byte{6}) // One partial packet + pkt2Time := pkt1Time + byteLatency + write([]byte{7, 8, 9, 10, 11, 12, 13}) // Two packets + pkt3Time := pkt2Time + byteLatency*5 + pkt4Time := pkt3Time + byteLatency*2 + + // No reads, so no sleeps yet. + wantSleeps() + + read := func(n int, want []byte) { + b := make([]byte, n) + if rd, err := c.Read(b); err != nil || rd != len(want) { + t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil", n, rd, err, len(want)) + } + if !reflect.DeepEqual(b[:len(want)], want) { + t.Fatalf("read %v; want %v", b, want) + } + } + + read(1, []byte{1}) + wantSleeps(pkt1Time) + read(1, []byte{2}) + wantSleeps() + read(3, []byte{3, 4, 5}) + wantSleeps() + read(2, []byte{6}) + wantSleeps(pkt2Time) + read(2, []byte{7, 8}) + wantSleeps(pkt3Time) + read(10, []byte{9, 10, 11}) + wantSleeps() + read(10, []byte{12, 13}) + wantSleeps(pkt4Time) +} + +func TestSync(t *testing.T) { + defer restoreHooks()() + + // Infinitely fast CPU: time doesn't pass unless sleep is called. + tn := time.Unix(123, 0) + now = func() time.Time { return tn } + sleep = func(d time.Duration) { tn = tn.Add(d) } + + // Simulate a 20ms latency network, then run sync across that and expect to + // measure 20ms latency, or 10ms additional delay for a 30ms network. + slowConn, err := (&Network{Kbps: 0, Latency: 20 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + c, err := (&Network{Latency: 30 * time.Millisecond}).Conn(slowConn) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + if c.(*conn).delay != 10*time.Millisecond { + t.Fatalf("c.delay = %v; want 10ms", c.(*conn).delay) + } +} + +func TestSyncTooSlow(t *testing.T) { + defer restoreHooks()() + + // Infinitely fast CPU: time doesn't pass unless sleep is called. + tn := time.Unix(123, 0) + now = func() time.Time { return tn } + sleep = func(d time.Duration) { tn = tn.Add(d) } + + // Simulate a 10ms latency network, then attempt to simulate a 5ms latency + // network and expect an error. + slowConn, err := (&Network{Kbps: 0, Latency: 10 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + + errWant := "measured network latency (10ms) higher than desired latency (5ms)" + if _, err := (&Network{Latency: 5 * time.Millisecond}).Conn(slowConn); err == nil || err.Error() != errWant { + t.Fatalf("Conn() = _, %q; want _, %q", err, errWant) + } +} + +func TestListenerAndDialer(t *testing.T) { + defer restoreHooks()() + + tn := time.Unix(123, 0) + startTime := tn + mu := &sync.Mutex{} + now = func() time.Time { + mu.Lock() + defer mu.Unlock() + return tn + } + + // Use a fairly high latency to cause a large BDP and avoid sleeps while + // writing due to simulation of full buffers. + n := &Network{Kbps: 2, Latency: 1 * time.Second, MTU: 10} + // 2 kbps = .25 kBps = 256 Bps + byteLatency := func(n int) time.Duration { + return time.Duration(n) * time.Second / 256 + } + + // Create a real listener and wrap it. + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Unexpected error creating listener: %v", err) + } + defer l.Close() + l = n.Listener(l) + + var serverConn net.Conn + var scErr error + scDone := make(chan struct{}) + go func() { + serverConn, scErr = l.Accept() + close(scDone) + }() + + // Create a dialer and use it. + clientConn, err := n.TimeoutDialer(net.DialTimeout)("tcp", l.Addr().String(), 2*time.Second) + if err != nil { + t.Fatalf("Unexpected error dialing: %v", err) + } + defer clientConn.Close() + + // Block until server's Conn is available. + <-scDone + if scErr != nil { + t.Fatalf("Unexpected error listening: %v", scErr) + } + defer serverConn.Close() + + // sleep (only) advances tn. Done after connections established so sync detects zero delay. + sleep = func(d time.Duration) { + mu.Lock() + defer mu.Unlock() + if d > 0 { + tn = tn.Add(d) + } + } + + seq := func(a, b int) []byte { + buf := make([]byte, b-a) + for i := 0; i < b-a; i++ { + buf[i] = byte(i + a) + } + return buf + } + + pkt1 := seq(0, 10) + pkt2 := seq(10, 30) + pkt3 := seq(30, 35) + + write := func(c net.Conn, b []byte) { + n, err := c.Write(b) + if n != len(b) || err != nil { + t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b)) + } + } + + write(serverConn, pkt1) + write(serverConn, pkt2) + write(serverConn, pkt3) + write(clientConn, pkt3) + write(clientConn, pkt1) + write(clientConn, pkt2) + + if tn != startTime { + t.Fatalf("unexpected sleep in write; tn = %v; want %v", tn, startTime) + } + + read := func(c net.Conn, n int, want []byte, timeWant time.Time) { + b := make([]byte, n) + if rd, err := c.Read(b); err != nil || rd != len(want) { + t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil (read: %v)", n, rd, err, len(want), b[:rd]) + } + if !reflect.DeepEqual(b[:len(want)], want) { + t.Fatalf("read %v; want %v", b, want) + } + if !tn.Equal(timeWant) { + t.Errorf("tn after read(%v) = %v; want %v", want, tn, timeWant) + } + } + + read(clientConn, len(pkt1)+1, pkt1, startTime.Add(n.Latency+byteLatency(len(pkt1)))) + read(serverConn, len(pkt3)+1, pkt3, tn) // tn was advanced by the above read; pkt3 is shorter than pkt1 + + read(clientConn, len(pkt2), pkt2[:10], startTime.Add(n.Latency+byteLatency(len(pkt1)+10))) + read(clientConn, len(pkt2), pkt2[10:], startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)))) + read(clientConn, len(pkt3), pkt3, startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)+len(pkt3)))) + + read(serverConn, len(pkt1), pkt1, tn) // tn already past the arrival time due to prior reads + read(serverConn, len(pkt2), pkt2[:10], tn) + read(serverConn, len(pkt2), pkt2[10:], tn) + + // Sleep awhile and make sure the read happens disregarding previous writes + // (lastSendEnd handling). + sleep(10 * time.Second) + write(clientConn, pkt1) + read(serverConn, len(pkt1), pkt1, tn.Add(n.Latency+byteLatency(len(pkt1)))) + + // Send, sleep longer than the network delay, then make sure the read happens + // instantly. + write(serverConn, pkt1) + sleep(10 * time.Second) + read(clientConn, len(pkt1), pkt1, tn) +} + +func TestBufferBloat(t *testing.T) { + defer restoreHooks()() + + // Infinitely fast CPU: time doesn't pass unless sleep is called. + tn := time.Unix(123, 0) + now = func() time.Time { return tn } + // Capture sleep times for checking later. + var sleepTimes []time.Duration + sleep = func(d time.Duration) { + sleepTimes = append(sleepTimes, d) + tn = tn.Add(d) + } + + wantSleeps := func(want ...time.Duration) error { + if !reflect.DeepEqual(want, sleepTimes) { + return fmt.Errorf("sleepTimes = %v; want %v", sleepTimes, want) + } + sleepTimes = nil + return nil + } + + n := &Network{Kbps: 8 /* 1KBps */, Latency: time.Second, MTU: 8} + bdpBytes := (n.Kbps * 1024 / 8) * int(n.Latency/time.Second) // 1024 + c, err := n.Conn(bufConn{&bytes.Buffer{}}) + if err != nil { + t.Fatalf("Unexpected error creating connection: %v", err) + } + wantSleeps(n.Latency) // Connection creation delay. + + write := func(n int, sleeps ...time.Duration) { + if wt, err := c.Write(make([]byte, n)); err != nil || wt != n { + t.Fatalf("c.Write(<%v bytes>) = %v, %v; want %v, nil", n, wt, err, n) + } + if err := wantSleeps(sleeps...); err != nil { + t.Fatalf("After writing %v bytes: %v", n, err) + } + } + + read := func(n int, sleeps ...time.Duration) { + if rd, err := c.Read(make([]byte, n)); err != nil || rd != n { + t.Fatalf("c.Read(_) = %v, %v; want %v, nil", rd, err, n) + } + if err := wantSleeps(sleeps...); err != nil { + t.Fatalf("After reading %v bytes: %v", n, err) + } + } + + write(8) // No reads and buffer not full, so no sleeps yet. + read(8, time.Second+n.pktTime(8)) + + write(bdpBytes) // Fill the buffer. + write(1) // We can send one extra packet even when the buffer is full. + write(n.MTU, n.pktTime(1)) // Make sure we sleep to clear the previous write. + write(1, n.pktTime(n.MTU)) + write(n.MTU+1, n.pktTime(1), n.pktTime(n.MTU)) + + tn = tn.Add(10 * time.Second) // Wait long enough for the buffer to clear. + write(bdpBytes) // No sleeps required. +} diff --git a/vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go b/vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go new file mode 100644 index 000000000000..59355590d587 --- /dev/null +++ b/vendor/google.golang.org/grpc/benchmark/primitives/primitives_test.go @@ -0,0 +1,235 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package primitives_test contains benchmarks for various synchronization primitives +// available in Go. +package primitives_test + +import ( + "sync" + "sync/atomic" + "testing" +) + +func BenchmarkSelectClosed(b *testing.B) { + c := make(chan struct{}) + close(c) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + select { + case <-c: + x++ + default: + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkSelectOpen(b *testing.B) { + c := make(chan struct{}) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + select { + case <-c: + default: + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkAtomicBool(b *testing.B) { + c := int32(0) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if atomic.LoadInt32(&c) == 0 { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkAtomicValue(b *testing.B) { + c := atomic.Value{} + c.Store(0) + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if c.Load().(int) == 0 { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutex(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.Lock() + x++ + c.Unlock() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkRWMutex(b *testing.B) { + c := sync.RWMutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.RLock() + x++ + c.RUnlock() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkRWMutexW(b *testing.B) { + c := sync.RWMutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.Lock() + x++ + c.Unlock() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutexWithDefer(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + func() { + c.Lock() + defer c.Unlock() + x++ + }() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutexWithClosureDefer(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + func() { + c.Lock() + defer func() { c.Unlock() }() + x++ + }() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkMutexWithoutDefer(b *testing.B) { + c := sync.Mutex{} + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + func() { + c.Lock() + x++ + c.Unlock() + }() + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +type myFooer struct{} + +func (myFooer) Foo() {} + +type fooer interface { + Foo() +} + +func BenchmarkInterfaceTypeAssertion(b *testing.B) { + // Call a separate function to avoid compiler optimizations. + runInterfaceTypeAssertion(b, myFooer{}) +} + +func runInterfaceTypeAssertion(b *testing.B, fer interface{}) { + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, ok := fer.(fooer); ok { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} + +func BenchmarkStructTypeAssertion(b *testing.B) { + // Call a separate function to avoid compiler optimizations. + runStructTypeAssertion(b, myFooer{}) +} + +func runStructTypeAssertion(b *testing.B, fer interface{}) { + x := 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, ok := fer.(myFooer); ok { + x++ + } + } + b.StopTimer() + if x != b.N { + b.Fatal("error") + } +} diff --git a/vendor/google.golang.org/grpc/benchmark/server/main.go b/vendor/google.golang.org/grpc/benchmark/server/main.go index d43aad0bac08..dcce130e478d 100644 --- a/vendor/google.golang.org/grpc/benchmark/server/main.go +++ b/vendor/google.golang.org/grpc/benchmark/server/main.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package main import ( diff --git a/vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem b/vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/benchmark/server/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key b/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem b/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/benchmark/server/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/benchmark/stats/histogram.go b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go index 918beadc5036..f038d26ed0aa 100644 --- a/vendor/google.golang.org/grpc/benchmark/stats/histogram.go +++ b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package stats import ( @@ -86,8 +104,14 @@ func NewHistogram(opts HistogramOptions) *Histogram { // Print writes textual output of the histogram values. func (h *Histogram) Print(w io.Writer) { + h.PrintWithUnit(w, 1) +} + +// PrintWithUnit writes textual output of the histogram values . +// Data in histogram is divided by a Unit before print. +func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { avg := float64(h.Sum) / float64(h.Count) - fmt.Fprintf(w, "Count: %d Min: %d Max: %d Avg: %.2f\n", h.Count, h.Min, h.Max, avg) + fmt.Fprintf(w, "Count: %d Min: %5.1f Max: %5.1f Avg: %.2f\n", h.Count, float64(h.Min)/unit, float64(h.Max)/unit, avg/unit) fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) if h.Count <= 0 { return @@ -103,9 +127,9 @@ func (h *Histogram) Print(w io.Writer) { accCount := int64(0) for i, b := range h.Buckets { - fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound) + fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound/unit) if i+1 < len(h.Buckets) { - fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound) + fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit) } else { fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") } diff --git a/vendor/google.golang.org/grpc/benchmark/stats/stats.go b/vendor/google.golang.org/grpc/benchmark/stats/stats.go index e0edb174072e..412daead052f 100644 --- a/vendor/google.golang.org/grpc/benchmark/stats/stats.go +++ b/vendor/google.golang.org/grpc/benchmark/stats/stats.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package stats import ( @@ -5,9 +23,132 @@ import ( "fmt" "io" "math" + "sort" + "strconv" "time" ) +// Features contains most fields for a benchmark +type Features struct { + NetworkMode string + EnableTrace bool + Latency time.Duration + Kbps int + Mtu int + MaxConcurrentCalls int + ReqSizeBytes int + RespSizeBytes int + EnableCompressor bool +} + +// String returns the textual output of the Features as string. +func (f Features) String() string { + return fmt.Sprintf("traceMode_%t-latency_%s-kbps_%#v-MTU_%#v-maxConcurrentCalls_"+ + "%#v-reqSize_%#vB-respSize_%#vB-Compressor_%t", f.EnableTrace, + f.Latency.String(), f.Kbps, f.Mtu, f.MaxConcurrentCalls, f.ReqSizeBytes, f.RespSizeBytes, f.EnableCompressor) +} + +// PartialPrintString can print certain features with different format. +func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string { + s := "" + var ( + prefix, suffix, linker string + isNetwork bool + ) + if shared { + suffix = "\n" + linker = ": " + } else { + prefix = "-" + linker = "_" + } + if noneEmptyPos[0] { + s += fmt.Sprintf("%sTrace%s%t%s", prefix, linker, f.EnableCompressor, suffix) + } + if shared && f.NetworkMode != "" { + s += fmt.Sprintf("Network: %s \n", f.NetworkMode) + isNetwork = true + } + if !isNetwork { + if noneEmptyPos[1] { + s += fmt.Sprintf("%slatency%s%s%s", prefix, linker, f.Latency.String(), suffix) + } + if noneEmptyPos[2] { + s += fmt.Sprintf("%skbps%s%#v%s", prefix, linker, f.Kbps, suffix) + } + if noneEmptyPos[3] { + s += fmt.Sprintf("%sMTU%s%#v%s", prefix, linker, f.Mtu, suffix) + } + } + if noneEmptyPos[4] { + s += fmt.Sprintf("%sCallers%s%#v%s", prefix, linker, f.MaxConcurrentCalls, suffix) + } + if noneEmptyPos[5] { + s += fmt.Sprintf("%sreqSize%s%#vB%s", prefix, linker, f.ReqSizeBytes, suffix) + } + if noneEmptyPos[6] { + s += fmt.Sprintf("%srespSize%s%#vB%s", prefix, linker, f.RespSizeBytes, suffix) + } + if noneEmptyPos[7] { + s += fmt.Sprintf("%sCompressor%s%t%s", prefix, linker, f.EnableCompressor, suffix) + } + return s +} + +type percentLatency struct { + Percent int + Value time.Duration +} + +// BenchResults records features and result of a benchmark. +type BenchResults struct { + RunMode string + Features Features + Latency []percentLatency + Operations int + NsPerOp int64 + AllocedBytesPerOp int64 + AllocsPerOp int64 + SharedPosion []bool +} + +// SetBenchmarkResult sets features of benchmark and basic results. +func (stats *Stats) SetBenchmarkResult(mode string, features Features, o int, allocdBytes, allocs int64, sharedPos []bool) { + stats.result.RunMode = mode + stats.result.Features = features + stats.result.Operations = o + stats.result.AllocedBytesPerOp = allocdBytes + stats.result.AllocsPerOp = allocs + stats.result.SharedPosion = sharedPos +} + +// GetBenchmarkResults returns the result of the benchmark including features and result. +func (stats *Stats) GetBenchmarkResults() BenchResults { + return stats.result +} + +// BenchString output latency stats as the format as time + unit. +func (stats *Stats) BenchString() string { + stats.maybeUpdate() + s := stats.result + res := s.RunMode + "-" + s.Features.String() + ": \n" + if len(s.Latency) != 0 { + var statsUnit = s.Latency[0].Value + var timeUnit = fmt.Sprintf("%v", statsUnit)[1:] + for i := 1; i < len(s.Latency)-1; i++ { + res += fmt.Sprintf("%d_Latency: %s %s \t", s.Latency[i].Percent, + strconv.FormatFloat(float64(s.Latency[i].Value)/float64(statsUnit), 'f', 4, 64), timeUnit) + } + res += fmt.Sprintf("Avg latency: %s %s \t", + strconv.FormatFloat(float64(s.Latency[len(s.Latency)-1].Value)/float64(statsUnit), 'f', 4, 64), timeUnit) + } + res += fmt.Sprintf("Count: %v \t", s.Operations) + res += fmt.Sprintf("%v Bytes/op\t", s.AllocedBytesPerOp) + res += fmt.Sprintf("%v Allocs/op\t", s.AllocsPerOp) + + return res +} + // Stats is a simple helper for gathering additional statistics like histogram // during benchmarks. This is not thread safe. type Stats struct { @@ -18,6 +159,9 @@ type Stats struct { durations durationSlice dirty bool + + sortLatency bool + result BenchResults } type durationSlice []time.Duration @@ -46,6 +190,18 @@ func (stats *Stats) Clear() { stats.durations = stats.durations[:0] stats.histogram = nil stats.dirty = false + stats.result = BenchResults{} +} + +//Sort method for durations +func (a durationSlice) Len() int { return len(a) } +func (a durationSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a durationSlice) Less(i, j int) bool { return a[i] < a[j] } +func max(a, b int64) int64 { + if a > b { + return a + } + return b } // maybeUpdate updates internal stat data if there was any newly added @@ -55,6 +211,12 @@ func (stats *Stats) maybeUpdate() { return } + if stats.sortLatency { + sort.Sort(stats.durations) + stats.min = int64(stats.durations[0]) + stats.max = int64(stats.durations[len(stats.durations)-1]) + } + stats.min = math.MaxInt64 stats.max = 0 for _, d := range stats.durations { @@ -75,9 +237,6 @@ func (stats *Stats) maybeUpdate() { stats.unit = u } - // Adjust the min/max according to the new unit. - stats.min /= int64(stats.unit) - stats.max /= int64(stats.unit) numBuckets := stats.numBuckets if n := int(stats.max - stats.min + 1); n < numBuckets { numBuckets = n @@ -90,21 +249,37 @@ func (stats *Stats) maybeUpdate() { MinValue: stats.min}) for _, d := range stats.durations { - stats.histogram.Add(int64(d / stats.unit)) + stats.histogram.Add(int64(d)) } stats.dirty = false + + if stats.durations.Len() != 0 { + var percentToObserve = []int{50, 90} + // First data record min unit from the latency result. + stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: stats.unit}) + for _, position := range percentToObserve { + stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: position, Value: stats.durations[max(stats.histogram.Count*int64(position)/100-1, 0)]}) + } + // Last data record the average latency. + avg := float64(stats.histogram.Sum) / float64(stats.histogram.Count) + stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: time.Duration(avg)}) + } +} + +// SortLatency blocks the output +func (stats *Stats) SortLatency() { + stats.sortLatency = true } // Print writes textual output of the Stats. func (stats *Stats) Print(w io.Writer) { stats.maybeUpdate() - if stats.histogram == nil { fmt.Fprint(w, "Histogram (empty)\n") } else { fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) - stats.histogram.Print(w) + stats.histogram.PrintWithUnit(w, float64(stats.unit)) } } diff --git a/vendor/google.golang.org/grpc/benchmark/stats/util.go b/vendor/google.golang.org/grpc/benchmark/stats/util.go index a9922f985f3d..f3bb3a364705 100644 --- a/vendor/google.golang.org/grpc/benchmark/stats/util.go +++ b/vendor/google.golang.org/grpc/benchmark/stats/util.go @@ -1,3 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + package stats import ( @@ -50,7 +68,7 @@ func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats { } p := strings.Split(runtime.FuncForPC(pc).Name(), ".") benchName = p[len(p)-1] - if strings.HasPrefix(benchName, "Benchmark") { + if strings.HasPrefix(benchName, "run") { break } } @@ -148,9 +166,8 @@ func splitLines(data []byte, eof bool) (advance int, token []byte, err error) { func injectStatsIfFinished(line string) { injectCond.L.Lock() defer injectCond.L.Unlock() - - // We assume that the benchmark results start with the benchmark name. - if curB == nil || !strings.HasPrefix(line, curBenchName) { + // We assume that the benchmark results start with "Benchmark". + if curB == nil || !strings.HasPrefix(line, "Benchmark") { return } diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go index dfe8c8f0fefa..9db1d8504a80 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go @@ -1,39 +1,25 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ package main import ( + "flag" "math" "runtime" "sync" @@ -48,10 +34,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/testdata" ) var ( - caFile = "benchmark/server/testdata/ca.pem" + caFile = flag.String("ca_file", "", "The file containing the CA root cert file") ) type lockingHistogram struct { @@ -137,7 +124,10 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error // Check and set security options. if config.SecurityParams != nil { - creds, err := credentials.NewClientTLSFromFile(abs(caFile), config.SecurityParams.ServerHostOverride) + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride) if err != nil { return nil, nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) } diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go index 0d20581df435..238dfdebcaf5 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go @@ -1,39 +1,25 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ package main import ( + "flag" "runtime" "strconv" "strings" @@ -47,12 +33,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/testdata" ) var ( - // File path related to google.golang.org/grpc. - certFile = "benchmark/server/testdata/server1.pem" - keyFile = "benchmark/server/testdata/server1.key" + certFile = flag.String("tls_cert_file", "", "The TLS cert file") + keyFile = flag.String("tls_key_file", "", "The TLS key file") ) type benchmarkServer struct { @@ -105,7 +91,13 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma // Set security options. if config.SecurityParams != nil { - creds, err := credentials.NewServerTLSFromFile(abs(certFile), abs(keyFile)) + if *certFile == "" { + *certFile = testdata.Path("server1.pem") + } + if *keyFile == "" { + *keyFile = testdata.Path("server1.key") + } + creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("failed to generate credentials %v", err) } diff --git a/vendor/google.golang.org/grpc/benchmark/worker/main.go b/vendor/google.golang.org/grpc/benchmark/worker/main.go index 8a80406202fe..2b1ba985bd78 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/main.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/main.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/benchmark/worker/util.go b/vendor/google.golang.org/grpc/benchmark/worker/util.go index 6f9b2b03fd5a..f26993e6540b 100644 --- a/vendor/google.golang.org/grpc/benchmark/worker/util.go +++ b/vendor/google.golang.org/grpc/benchmark/worker/util.go @@ -1,57 +1,24 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2016 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ package main -import ( - "log" - "os" - "path/filepath" - "syscall" -) - -// abs returns the absolute path the given relative file or directory path, -// relative to the google.golang.org/grpc directory in the user's GOPATH. -// If rel is already absolute, it is returned unmodified. -func abs(rel string) string { - if filepath.IsAbs(rel) { - return rel - } - v, err := goPackagePath("google.golang.org/grpc") - if err != nil { - log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err) - } - return filepath.Join(v, rel) -} +import "syscall" func cpuTimeDiff(first *syscall.Rusage, latest *syscall.Rusage) (float64, float64) { var ( @@ -66,25 +33,3 @@ func cpuTimeDiff(first *syscall.Rusage, latest *syscall.Rusage) (float64, float6 return uTimeElapsed, sTimeElapsed } - -func goPackagePath(pkg string) (path string, err error) { - gp := os.Getenv("GOPATH") - if gp == "" { - return path, os.ErrNotExist - } - for _, p := range filepath.SplitList(gp) { - dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) - fi, err := os.Stat(dir) - if os.IsNotExist(err) { - continue - } - if err != nil { - return "", err - } - if !fi.IsDir() { - continue - } - return dir, nil - } - return path, os.ErrNotExist -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index af34a71316fb..1ef2507c35f3 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,6 +25,7 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -73,7 +59,10 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran } } for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply, dopts.maxMsgSize, inPayload); err != nil { + if c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { break } @@ -86,14 +75,11 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) } c.trailerMD = stream.Trailer() - if peer, ok := peer.FromContext(stream.Context()); ok { - c.peer = peer - } return nil } // sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { defer func() { if err != nil { // If err is connection error, t will be closed, no need to close stream here. @@ -114,11 +100,17 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, Client: true, } } - outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload) + hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err } - err = t.Write(stream, outBuf, opts) + if c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) @@ -144,25 +136,33 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { - c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - defer cancel() - } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady } + + if mc.Timeout != nil && *mc.Timeout >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer cancel() + } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(&c); err != nil { + if err := o.before(c); err != nil { return toRPCErr(err) } } defer func() { for _, o := range opts { - o.after(&c) + o.after(c) } }() + + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if EnableTracing { c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) defer c.traceInfo.tr.Finish() @@ -179,27 +179,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } }() } - ctx = newContextWithRPCInfo(ctx) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ Client: true, EndTime: time.Now(), Error: e, } sh.HandleRPC(ctx, end) - } - }() + }() + } topts := &transport.Options{ Last: true, Delay: false, @@ -209,9 +207,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err error t transport.ClientTransport stream *transport.Stream - // Record the put handler from Balancer.Get(...). It is called once the + // Record the done handler from Balancer.Get(...). It is called once the // RPC has completed or failed. - put func() + done func(balancer.DoneInfo) ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ @@ -221,11 +219,11 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } - - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, + if c.creds != nil { + callHdr.Creds = c.creds } - t, put, err = cc.getTransport(ctx, gopts) + + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -245,28 +243,31 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } stream, err = t.NewStream(ctx, callHdr) if err != nil { - if put != nil { + if done != nil { if _, ok := err.(transport.ConnectionError); ok { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - put() + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue } return toRPCErr(err) } - err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, stream, t, args, topts) + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } // Retry a non-failfast RPC when // i) there is a connection error; or @@ -276,14 +277,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } return toRPCErr(err) } - err = recvResponse(ctx, cc.dopts, t, &c, stream, reply) + err = recvResponse(ctx, cc.dopts, t, c, stream, reply) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -294,12 +295,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put() + done(balancer.DoneInfo{Err: err}) } return stream.Status().Err() } diff --git a/vendor/google.golang.org/grpc/call_test.go b/vendor/google.golang.org/grpc/call_test.go index 437197c8d86c..f48d30e879b5 100644 --- a/vendor/google.golang.org/grpc/call_test.go +++ b/vendor/google.golang.org/grpc/call_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,6 +32,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/grpc/test/leakcheck" "google.golang.org/grpc/transport" ) @@ -119,18 +105,19 @@ func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) { } } // send a response back to end the stream. - reply, err := encode(testCodec{}, &expectedResponse, nil, nil, nil) + hdr, data, err := encode(testCodec{}, &expectedResponse, nil, nil, nil) if err != nil { t.Errorf("Failed to encode the response: %v", err) return } - h.t.Write(s, reply, &transport.Options{}) + h.t.Write(s, hdr, data, &transport.Options{}) h.t.WriteStatus(s, status.New(codes.OK, "")) } type server struct { lis net.Listener port string + addr string startedErr chan error // sent nil or an error after server starts mu sync.Mutex conns map[transport.ServerTransport]bool @@ -152,7 +139,8 @@ func (s *server) start(t *testing.T, port int, maxStreams uint32) { s.startedErr <- fmt.Errorf("failed to listen: %v", err) return } - _, p, err := net.SplitHostPort(s.lis.Addr().String()) + s.addr = s.lis.Addr().String() + _, p, err := net.SplitHostPort(s.addr) if err != nil { s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) return @@ -226,6 +214,7 @@ func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) { } func TestInvoke(t *testing.T) { + defer leakcheck.Check(t) server, cc := setUp(t, 0, math.MaxUint32) var reply string if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse { @@ -236,6 +225,7 @@ func TestInvoke(t *testing.T) { } func TestInvokeLargeErr(t *testing.T) { + defer leakcheck.Check(t) server, cc := setUp(t, 0, math.MaxUint32) var reply string req := "hello" @@ -252,6 +242,7 @@ func TestInvokeLargeErr(t *testing.T) { // TestInvokeErrorSpecialChars checks that error messages don't get mangled. func TestInvokeErrorSpecialChars(t *testing.T) { + defer leakcheck.Check(t) server, cc := setUp(t, 0, math.MaxUint32) var reply string req := "weird error" @@ -268,6 +259,7 @@ func TestInvokeErrorSpecialChars(t *testing.T) { // TestInvokeCancel checks that an Invoke with a canceled context is not sent. func TestInvokeCancel(t *testing.T) { + defer leakcheck.Check(t) server, cc := setUp(t, 0, math.MaxUint32) var reply string req := "canceled" @@ -286,6 +278,7 @@ func TestInvokeCancel(t *testing.T) { // TestInvokeCancelClosedNonFail checks that a canceled non-failfast RPC // on a closed client will terminate. func TestInvokeCancelClosedNonFailFast(t *testing.T) { + defer leakcheck.Check(t) server, cc := setUp(t, 0, math.MaxUint32) var reply string cc.Close() diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f542d8bd0417..71de2e50d2bd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,14 +23,19 @@ import ( "fmt" "math" "net" + "reflect" + "strings" "sync" "time" "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -56,8 +46,7 @@ var ( ErrClientConnClosing = errors.New("grpc: the client connection is closing") // ErrClientConnTimeout indicates that the ClientConn cannot establish the // underlying connections within the specified timeout. - // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be - // removed in Q1 2017. + // DEPRECATED: Please use context.DeadlineExceeded instead. ErrClientConnTimeout = errors.New("grpc: timed out when dialing") // errNoTransportSecurity indicates that there is no transport security @@ -79,6 +68,8 @@ var ( errConnClosing = errors.New("grpc: the connection is closing") // errConnUnavailable indicates that the connection is unavailable. errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -86,30 +77,71 @@ var ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - copts transport.ConnectOptions - maxMsgSize int + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is to support v1 balancer. + balancerBuilder balancer.Builder } -const defaultClientMaxMsgSize = math.MaxInt32 +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 +) // DialOption configures how we set up the connection. type DialOption func(*dialOptions) -// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { return func(o *dialOptions) { - o.maxMsgSize = s + o.callOptions = append(o.callOptions, cos...) } } @@ -136,10 +168,23 @@ func WithDecompressor(dc Decompressor) DialOption { } } -// WithBalancer returns a DialOption which sets a load balancer. +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// Deprecated: use the new balancer APIs in balancer package instead. func WithBalancer(b Balancer) DialOption { return func(o *dialOptions) { - o.balancer = b + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerBuilder is for testing only. Users using custom balancers should +// register their balancer and use service config to choose the balancer to use. +func WithBalancerBuilder(b balancer.Builder) DialOption { + // TODO(bar) remove this when switching balancer is done. + return func(o *dialOptions) { + o.balancerBuilder = b } } @@ -204,7 +249,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption } // WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. +// credentials and places auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) @@ -213,6 +258,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn // initially. This is valid if and only if WithBlock() is present. +// Deprecated: use DialContext and context.WithTimeout instead. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { o.timeout = d @@ -241,7 +287,7 @@ func WithStatsHandler(h stats.Handler) DialOption { } } -// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors. +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. // If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network // address and won't try to reconnect. // The default value of FailOnNonTempDialError is false. @@ -259,7 +305,7 @@ func WithUserAgent(s string) DialOption { } } -// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { return func(o *dialOptions) { o.copts.KeepaliveParams = kp @@ -295,26 +341,44 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { } // DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connecting. Once this function returns, the +// cancel or expire the pending connection. Once this function returns, the // cancellation and expiration of ctx will be noop. Users should call ClientConn.Close // to terminate all the pending operations after this function returns. -// This is the EXPERIMENTAL API. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - conns: make(map[Address]*addrConn), + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), } cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.dopts.maxMsgSize = defaultClientMaxMsgSize + for _, opt := range opts { opt(&cc.dopts) } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { - return dialContext(ctx, "tcp", addr) + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) }, ) } @@ -343,15 +407,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() + scSet := false if cc.dopts.scChan != nil { - // Wait for the initial service config. + // Try to get an initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = sc + scSet = true } - case <-ctx.Done(): - return nil, ctx.Err() + default: } } // Set defaults. @@ -369,89 +434,130 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } else { cc.authority = target } - waitC := make(chan error, 1) - go func() { - defer close(waitC) - if cc.dopts.balancer == nil && cc.sc.LB != nil { - cc.dopts.balancer = cc.sc.LB + + if cc.dopts.balancerBuilder != nil { + var credsClone credentials.TransportCredentials + if creds != nil { + credsClone = creds.Clone() } - if cc.dopts.balancer != nil { - var credsClone credentials.TransportCredentials - if creds != nil { - credsClone = creds.Clone() - } - config := BalancerConfig{ - DialCreds: credsClone, - } - if err := cc.dopts.balancer.Start(target, config); err != nil { + buildOpts := balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + } + // Build should not take long time. So it's ok to not have a goroutine for it. + // TODO(bar) init balancer after first resolver result to support service config balancer. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, buildOpts) + } else { + waitC := make(chan error, 1) + go func() { + defer close(waitC) + // No balancer, or no resolver within the balancer. Connect directly. + ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) + if err != nil { waitC <- err return } - ch := cc.dopts.balancer.Notify() - if ch != nil { - if cc.dopts.block { - doneChan := make(chan struct{}) - go cc.lbWatcher(doneChan) - <-doneChan - } else { - go cc.lbWatcher(nil) - } + if err := ac.connect(cc.dopts.block); err != nil { + waitC <- err return } + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } } - // No balancer, or no resolver within the balancer. Connect directly. - if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil { - waitC <- err - return - } - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err + } + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() } } - if cc.dopts.scChan != nil { go cc.scWatcher() } + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + if cc.balancerWrapper != nil && cc.resolverWrapper == nil { + // TODO(bar) there should always be a resolver (DNS as the default). + // Unblock balancer initialization with a fake resolver update if there's no resolver. + // The balancer wrapper will not read the addresses, so an empty list works. + // TODO(bar) remove this after the real resolver is started. + cc.balancerWrapper.handleResolvedAddrs([]resolver.Address{}, nil) + } + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + return cc, nil } -// ConnectivityState indicates the state of a client connection. -type ConnectivityState int +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} -const ( - // Idle indicates the ClientConn is idle. - Idle ConnectivityState = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} -func (s ConnectivityState) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - panic(fmt.Sprintf("unknown connectivity state: %d", s)) +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) } + return csm.notifyChan } // ClientConn represents a client connection to an RPC server. @@ -462,60 +568,42 @@ type ClientConn struct { target string authority string dopts dialOptions + csMgr *connectivityStateManager + + balancerWrapper *ccBalancerWrapper + resolverWrapper *ccResolverWrapper + + blockingpicker *pickerWrapper mu sync.RWMutex sc ServiceConfig - conns map[Address]*addrConn - // Keepalive parameter can be udated if a GoAway is received. + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. mkp keepalive.ClientParameters } -// lbWatcher watches the Notify channel of the balancer in cc and manages -// connections accordingly. If doneChan is not nil, it is closed after the -// first successfull connection is made. -func (cc *ClientConn) lbWatcher(doneChan chan struct{}) { - for addrs := range cc.dopts.balancer.Notify() { - var ( - add []Address // Addresses need to setup connections. - del []*addrConn // Connections need to tear down. - ) - cc.mu.Lock() - for _, a := range addrs { - if _, ok := cc.conns[a]; !ok { - add = append(add, a) - } - } - for k, c := range cc.conns { - var keep bool - for _, a := range addrs { - if k == a { - keep = true - break - } - } - if !keep { - del = append(del, c) - delete(cc.conns, c.addr) - } - } - cc.mu.Unlock() - for _, a := range add { - if doneChan != nil { - err := cc.resetAddrConn(a, true, nil) - if err == nil { - close(doneChan) - doneChan = nil - } - } else { - cc.resetAddrConn(a, false, nil) - } - } - for _, c := range del { - c.tearDown(errConnDrain) - } +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true } } +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + func (cc *ClientConn) scWatcher() { for { select { @@ -534,69 +622,64 @@ func (cc *ClientConn) scWatcher() { } } -// resetAddrConn creates an addrConn for addr and adds it to cc.conns. -// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. -// If tearDownErr is nil, errConnDrain will be used instead. -func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ cc: cc, - addr: addr, + addrs: addrs, dopts: cc.dopts, } - cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = cc.mkp - cc.mu.RUnlock() ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - ac.stateCV = sync.NewCond(&ac.mu) - if EnableTracing { - ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) - } - if !ac.dopts.insecure { - if ac.dopts.copts.TransportCredentials == nil { - return errNoTransportSecurity - } - } else { - if ac.dopts.copts.TransportCredentials != nil { - return errCredentialsConflict - } - for _, cd := range ac.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing + return nil, ErrClientConnClosing } - stale := cc.conns[ac.addr] - cc.conns[ac.addr] = ac + cc.conns[ac] = struct{}{} cc.mu.Unlock() - if stale != nil { - // There is an addrConn alive on ac.addr already. This could be due to - // 1) a buggy Balancer notifies duplicated Addresses; - // 2) goaway was received, a new ac will replace the old ac. - // The old ac should be deleted from cc.conns, but the - // underlying transport should drain rather than close. - if tearDownErr == nil { - // tearDownErr is nil if resetAddrConn is called by - // 1) Dial - // 2) lbWatcher - // In both cases, the stale ac should drain, not close. - stale.tearDown(errConnDrain) - } else { - stale.tearDown(tearDownErr) - } + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect(block bool) error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.mu.Unlock() + if block { - if err := ac.resetTransport(false); err != nil { + if err := ac.resetTransport(); err != nil { if err != errConnClosing { - // Tear down ac and delete it from cc.conns. - cc.mu.Lock() - delete(cc.conns, ac.addr) - cc.mu.Unlock() ac.tearDown(err) } if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { @@ -609,8 +692,8 @@ func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := ac.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) if err != errConnClosing { // Keep this ac in cc.conns, to get the reason it's torn down. ac.tearDown(err) @@ -623,66 +706,86 @@ func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) return nil } -// TODO: Avoid the locking here. -func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) { +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the serivce, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - m, ok = cc.sc.Methods[method] - return + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m, _ = cc.sc.Methods[method[:i+1]] + } + return m } -func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { - var ( - ac *addrConn - ok bool - put func() - ) - if cc.dopts.balancer == nil { +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + if cc.balancerWrapper == nil { // If balancer is nil, there should be only one addrConn available. cc.mu.RLock() if cc.conns == nil { cc.mu.RUnlock() + // TODO this function returns toRPCErr and non-toRPCErr. Clean up + // the errors in ClientConn. return nil, nil, toRPCErr(ErrClientConnClosing) } - for _, ac = range cc.conns { + var ac *addrConn + for ac = range cc.conns { // Break after the first iteration to get the first addrConn. - ok = true break } cc.mu.RUnlock() - } else { - var ( - addr Address - err error - ) - addr, put, err = cc.dopts.balancer.Get(ctx, opts) - if err != nil { - return nil, nil, toRPCErr(err) - } - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) + if ac == nil { + return nil, nil, errConnClosing } - ac, ok = cc.conns[addr] - cc.mu.RUnlock() - } - if !ok { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put() + t, err := ac.wait(ctx, false /*hasBalancer*/, failfast) + if err != nil { + return nil, nil, err } - return nil, nil, errConnClosing + return t, nil, nil } - t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) + + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) if err != nil { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put() - } - return nil, nil, err + return nil, nil, toRPCErr(err) } - return t, put, nil + return t, done, nil } // Close tears down the ClientConn and all underlying connections. @@ -696,11 +799,16 @@ func (cc *ClientConn) Close() error { } conns := cc.conns cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) cc.mu.Unlock() - if cc.dopts.balancer != nil { - cc.dopts.balancer.Close() + cc.blockingpicker.close() + if cc.resolverWrapper != nil { + cc.resolverWrapper.close() + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() } - for _, ac := range conns { + for ac := range conns { ac.tearDown(ErrClientConnClosing) } return nil @@ -711,15 +819,15 @@ type addrConn struct { ctx context.Context cancel context.CancelFunc - cc *ClientConn - addr Address - dopts dialOptions - events trace.EventLog + cc *ClientConn + curAddr resolver.Address + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - down func(error) // the handler called when a connection is down. + mu sync.Mutex + state connectivity.State // ready is closed and becomes nil when a new transport is up or failed // due to timeout. ready chan struct{} @@ -759,125 +867,137 @@ func (ac *addrConn) errorf(format string, a ...interface{}) { } } -// getState returns the connectivity state of the Conn -func (ac *addrConn) getState() ConnectivityState { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -// waitForStateChange blocks until the state changes to something other than the sourceState. -func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { ac.mu.Lock() - defer ac.mu.Unlock() - if sourceState != ac.state { - return ac.state, nil + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing } - done := make(chan struct{}) - var err error - go func() { - select { - case <-ctx.Done(): - ac.mu.Lock() - err = ctx.Err() - ac.stateCV.Broadcast() - ac.mu.Unlock() - case <-done: - } - }() - defer close(done) - for sourceState == ac.state { - ac.stateCV.Wait() - if err != nil { - return ac.state, err - } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - return ac.state, nil -} - -func (ac *addrConn) resetTransport(closeTransport bool) error { + ac.transport = nil + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() for retries := 0; ; retries++ { + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout ac.mu.Lock() - ac.printf("connecting") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. + if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { + timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + } + connectTime := time.Now() + if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing } - if ac.down != nil { - ac.down(downErrorf(false, true, "%v", errNetworkIO)) - ac.down = nil + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + // TODO(bar) remove condition once we always have a balancer. + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } } - ac.state = Connecting - ac.stateCV.Broadcast() - t := ac.transport + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts ac.mu.Unlock() - if closeTransport && t != nil { - t.Close() - } - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout - if timeout < sleepTime { - timeout = sleepTime - } - ctx, cancel := context.WithTimeout(ac.ctx, timeout) - connectTime := time.Now() - sinfo := transport.TargetInfo{ - Addr: ac.addr.Addr, - Metadata: ac.addr.Metadata, - } - newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) - // Don't call cancel in success path due to a race in Go 1.6: - // https://github.com/golang/go/issues/15078. - if err != nil { - cancel() - - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return err + for _, addr := range addrsIter { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + sinfo := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + } + newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) + if err != nil { + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + ac.mu.Lock() + if ac.state != connectivity.Shutdown { + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + } + ac.mu.Unlock() + return err + } + grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + continue } - grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) ac.mu.Lock() - if ac.state == Shutdown { + ac.printf("ready") + if ac.state == connectivity.Shutdown { // ac.tearDown(...) has been invoked. ac.mu.Unlock() + newTransport.Close() return errConnClosing } - ac.errorf("transient failure: %v", err) - ac.state = TransientFailure - ac.stateCV.Broadcast() + ac.state = connectivity.Ready + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + t := ac.transport + ac.transport = newTransport + if t != nil { + t.Close() + } + ac.curAddr = addr if ac.ready != nil { close(ac.ready) ac.ready = nil } ac.mu.Unlock() - closeTransport = false - select { - case <-time.After(sleepTime - time.Since(connectTime)): - case <-ac.ctx.Done(): - return ac.ctx.Err() - } - continue + return nil } ac.mu.Lock() - ac.printf("ready") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - newTransport.Close() - return errConnClosing + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) } - ac.state = Ready - ac.stateCV.Broadcast() - ac.transport = newTransport if ac.ready != nil { close(ac.ready) ac.ready = nil } - if ac.cc.dopts.balancer != nil { - ac.down = ac.cc.dopts.balancer.Up(ac.addr) - } ac.mu.Unlock() - return nil + timer := time.NewTimer(sleepTime - time.Since(connectTime)) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return ac.ctx.Err() + } + timer.Stop() } } @@ -888,73 +1008,54 @@ func (ac *addrConn) transportMonitor() { ac.mu.Lock() t := ac.transport ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + case <-t.Error(): + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. select { - // This is needed to detect the teardown when - // the addrConn is idle (i.e., no RPC in flight). - case <-ac.ctx.Done(): - select { - case <-t.Error(): - t.Close() - default: - } - return case <-t.GoAway(): ac.adjustParams(t.GetGoAwayReason()) - // If GoAway happens without any network I/O error, ac is closed without shutting down the - // underlying transport (the transport will be closed when all the pending RPCs finished or - // failed.). - // If GoAway and some network I/O error happen concurrently, ac and its underlying transport - // are closed. - // In both cases, a new ac is created. - select { - case <-t.Error(): - ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) - default: - ac.cc.resetAddrConn(ac.addr, false, errConnDrain) - } + default: + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() return - case <-t.Error(): - select { - case <-ac.ctx.Done(): - t.Close() - return - case <-t.GoAway(): - ac.adjustParams(t.GetGoAwayReason()) - ac.cc.resetAddrConn(ac.addr, false, errNetworkIO) - return - default: - } + } + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { ac.mu.Lock() - if ac.state == Shutdown { - // ac has been shutdown. - ac.mu.Unlock() - return - } - ac.state = TransientFailure - ac.stateCV.Broadcast() + ac.printf("transport exiting: %v", err) ac.mu.Unlock() - if err := ac.resetTransport(true); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) } + return } } } // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in TransientFailure and there is a balancer/failfast is true. +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { for { ac.mu.Lock() switch { - case ac.state == Shutdown: + case ac.state == connectivity.Shutdown: if failfast || !hasBalancer { // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. err := ac.tearDownErr @@ -963,11 +1064,11 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } ac.mu.Unlock() return nil, errConnClosing - case ac.state == Ready: + case ac.state == connectivity.Ready: ct := ac.transport ac.mu.Unlock() return ct, nil - case ac.state == TransientFailure: + case ac.state == connectivity.TransientFailure: if failfast || hasBalancer { ac.mu.Unlock() return nil, errConnUnavailable @@ -988,6 +1089,28 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } } +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect(false) + } + return nil, false +} + // tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many addrConn's in a @@ -995,13 +1118,9 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { ac.cancel() - ac.mu.Lock() + ac.curAddr = resolver.Address{} defer ac.mu.Unlock() - if ac.down != nil { - ac.down(downErrorf(false, false, "%v", err)) - ac.down = nil - } if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or @@ -1009,12 +1128,16 @@ func (ac *addrConn) tearDown(err error) { // address removal and GoAway. ac.transport.GracefulClose() } - if ac.state == Shutdown { + if ac.state == connectivity.Shutdown { return } - ac.state = Shutdown + ac.state = connectivity.Shutdown ac.tearDownErr = err - ac.stateCV.Broadcast() + if ac.cc.balancerWrapper != nil { + ac.cc.balancerWrapper.handleSubConnStateChange(ac.acbw, ac.state) + } else { + ac.cc.csMgr.updateState(ac.state) + } if ac.events != nil { ac.events.Finish() ac.events = nil @@ -1023,8 +1146,11 @@ func (ac *addrConn) tearDown(err error) { close(ac.ready) ac.ready = nil } - if ac.transport != nil && err != errConnDrain { - ac.transport.Close() - } return } + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} diff --git a/vendor/google.golang.org/grpc/clientconn_test.go b/vendor/google.golang.org/grpc/clientconn_test.go index 2db470eaac98..47801e9625f8 100644 --- a/vendor/google.golang.org/grpc/clientconn_test.go +++ b/vendor/google.golang.org/grpc/clientconn_test.go @@ -1,52 +1,87 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ package grpc import ( + "math" "net" "testing" "time" "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/testdata" ) -const tlsDir = "testdata/" +func assertState(wantState connectivity.State, cc *ClientConn) (connectivity.State, bool) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + var state connectivity.State + for state = cc.GetState(); state != wantState && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + return state, state == wantState +} + +func TestConnectivityStates(t *testing.T) { + defer leakcheck.Check(t) + servers, resolver, cleanup := startServers(t, 2, math.MaxUint32) + defer cleanup() + cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(resolver)), WithInsecure()) + if err != nil { + t.Fatalf("Dial(\"foo.bar.com\", WithBalancer(_)) = _, %v, want _ ", err) + } + defer cc.Close() + wantState := connectivity.Ready + if state, ok := assertState(wantState, cc); !ok { + t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) + } + // Send an update to delete the server connection (tearDown addrConn). + update := []*naming.Update{ + { + Op: naming.Delete, + Addr: "localhost:" + servers[0].port, + }, + } + resolver.w.inject(update) + wantState = connectivity.TransientFailure + if state, ok := assertState(wantState, cc); !ok { + t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) + } + update[0] = &naming.Update{ + Op: naming.Add, + Addr: "localhost:" + servers[1].port, + } + resolver.w.inject(update) + wantState = connectivity.Ready + if state, ok := assertState(wantState, cc); !ok { + t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) + } + +} func TestDialTimeout(t *testing.T) { + defer leakcheck.Check(t) conn, err := Dial("Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure()) if err == nil { conn.Close() @@ -57,7 +92,8 @@ func TestDialTimeout(t *testing.T) { } func TestTLSDialTimeout(t *testing.T) { - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + defer leakcheck.Check(t) + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { t.Fatalf("Failed to create credentials %v", err) } @@ -71,6 +107,7 @@ func TestTLSDialTimeout(t *testing.T) { } func TestDefaultAuthority(t *testing.T) { + defer leakcheck.Check(t) target := "Non-Existent.Server:8080" conn, err := Dial(target, WithInsecure()) if err != nil { @@ -83,8 +120,9 @@ func TestDefaultAuthority(t *testing.T) { } func TestTLSServerNameOverwrite(t *testing.T) { + defer leakcheck.Check(t) overwriteServerName := "over.write.server.name" - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", overwriteServerName) + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName) if err != nil { t.Fatalf("Failed to create credentials %v", err) } @@ -99,6 +137,7 @@ func TestTLSServerNameOverwrite(t *testing.T) { } func TestWithAuthority(t *testing.T) { + defer leakcheck.Check(t) overwriteServerName := "over.write.server.name" conn, err := Dial("Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName)) if err != nil { @@ -111,8 +150,9 @@ func TestWithAuthority(t *testing.T) { } func TestWithAuthorityAndTLS(t *testing.T) { + defer leakcheck.Check(t) overwriteServerName := "over.write.server.name" - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", overwriteServerName) + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName) if err != nil { t.Fatalf("Failed to create credentials %v", err) } @@ -127,6 +167,7 @@ func TestWithAuthorityAndTLS(t *testing.T) { } func TestDialContextCancel(t *testing.T) { + defer leakcheck.Check(t) ctx, cancel := context.WithCancel(context.Background()) cancel() if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure()); err != context.Canceled { @@ -161,6 +202,7 @@ func (b *blockingBalancer) Close() error { } func TestDialWithBlockingBalancer(t *testing.T) { + defer leakcheck.Check(t) ctx, cancel := context.WithCancel(context.Background()) dialDone := make(chan struct{}) go func() { @@ -183,7 +225,8 @@ func (c securePerRPCCredentials) RequireTransportSecurity() bool { } func TestCredentialsMisuse(t *testing.T) { - tlsCreds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + defer leakcheck.Check(t) + tlsCreds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { t.Fatalf("Failed to create authenticator %v", err) } @@ -198,10 +241,12 @@ func TestCredentialsMisuse(t *testing.T) { } func TestWithBackoffConfigDefault(t *testing.T) { + defer leakcheck.Check(t) testBackoffConfigSet(t, &DefaultBackoffConfig) } func TestWithBackoffConfig(t *testing.T) { + defer leakcheck.Check(t) b := BackoffConfig{MaxDelay: DefaultBackoffConfig.MaxDelay / 2} expected := b setDefaults(&expected) // defaults should be set @@ -209,6 +254,7 @@ func TestWithBackoffConfig(t *testing.T) { } func TestWithBackoffMaxDelay(t *testing.T) { + defer leakcheck.Check(t) md := DefaultBackoffConfig.MaxDelay / 2 expected := BackoffConfig{MaxDelay: md} setDefaults(&expected) @@ -256,7 +302,9 @@ func nonTemporaryErrorDialer(addr string, timeout time.Duration) (net.Conn, erro } func TestDialWithBlockErrorOnNonTemporaryErrorDialer(t *testing.T) { - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer leakcheck.Check(t) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() if _, err := DialContext(ctx, "", WithInsecure(), WithDialer(nonTemporaryErrorDialer), WithBlock(), FailOnNonTempDialError(true)); err != nonTemporaryError { t.Fatalf("Dial(%q) = %v, want %v", "", err, nonTemporaryError) } @@ -294,21 +342,26 @@ func (b *emptyBalancer) Close() error { } func TestNonblockingDialWithEmptyBalancer(t *testing.T) { + defer leakcheck.Check(t) ctx, cancel := context.WithCancel(context.Background()) - dialDone := make(chan struct{}) + defer cancel() + dialDone := make(chan error) go func() { - conn, err := DialContext(ctx, "Non-Existent.Server:80", WithInsecure(), WithBalancer(newEmptyBalancer())) - if err != nil { - t.Fatalf("unexpected error dialing connection: %v", err) - } - conn.Close() - close(dialDone) + dialDone <- func() error { + conn, err := DialContext(ctx, "Non-Existent.Server:80", WithInsecure(), WithBalancer(newEmptyBalancer())) + if err != nil { + return err + } + return conn.Close() + }() }() - <-dialDone - cancel() + if err := <-dialDone; err != nil { + t.Fatalf("unexpected error dialing connection: %s", err) + } } func TestClientUpdatesParamsAfterGoAway(t *testing.T) { + defer leakcheck.Check(t) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen. Err: %v", err) diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index bd76ebb7f176..905b048e2ac5 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -1,35 +1,20 @@ /* -* - * Copyright 2014, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2014 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 * -*/ + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ package grpc @@ -96,6 +81,7 @@ func (p protoCodec) Marshal(v interface{}) ([]byte, error) { func (p protoCodec) Unmarshal(data []byte, v interface{}) error { cb := protoBufferPool.Get().(*cachedProtoBuffer) cb.SetBuf(data) + v.(proto.Message).Reset() err := cb.Unmarshal(v.(proto.Message)) cb.SetBuf(nil) protoBufferPool.Put(cb) diff --git a/vendor/google.golang.org/grpc/codec_benchmark_test.go b/vendor/google.golang.org/grpc/codec_benchmark_test.go index 6726a53ed629..dee617ca2553 100644 --- a/vendor/google.golang.org/grpc/codec_benchmark_test.go +++ b/vendor/google.golang.org/grpc/codec_benchmark_test.go @@ -2,34 +2,19 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/codec_test.go b/vendor/google.golang.org/grpc/codec_test.go index 8e9b2150c97a..246b13b08c6b 100644 --- a/vendor/google.golang.org/grpc/codec_test.go +++ b/vendor/google.golang.org/grpc/codec_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index e6762d084558..259837060abf 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -1,4 +1,4 @@ -// generated by stringer -type=Code; DO NOT EDIT +// Code generated by "stringer -type=Code"; DO NOT EDIT. package codes @@ -9,7 +9,7 @@ const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlre var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} func (i Code) String() string { - if i+1 >= Code(len(_Code_index)) { + if i >= Code(len(_Code_index)-1) { return fmt.Sprintf("Code(%d)", i) } return _Code_name[_Code_index[i]:_Code_index[i+1]] diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index e14b464acfd4..21e7733a5fe9 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -44,7 +29,7 @@ const ( // OK is returned on success. OK Code = 0 - // Canceled indicates the operation was cancelled (typically by the caller). + // Canceled indicates the operation was canceled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000000..568ef5dc68ba --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100755 index b85f9181dee9..000000000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index a8114d671303..946aa1f2b85c 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -106,10 +91,14 @@ type TransportCredentials interface { // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo @@ -196,14 +185,14 @@ func NewTLS(c *tls.Config) TransportCredentials { return tc } -// NewClientTLSFromCert constructs a TLS from the input certificate for client. +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { @@ -218,12 +207,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } -// NewServerTLSFromCert constructs a TLS from the input certificate for server. +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } -// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) diff --git a/vendor/google.golang.org/grpc/credentials/credentials_test.go b/vendor/google.golang.org/grpc/credentials/credentials_test.go index 1609374cc311..9b13db51d438 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_test.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -39,6 +24,7 @@ import ( "testing" "golang.org/x/net/context" + "google.golang.org/grpc/testdata" ) func TestTLSOverrideServerName(t *testing.T) { @@ -65,8 +51,6 @@ func TestTLSClone(t *testing.T) { } -const tlsDir = "../test/testdata/" - type serverHandshake func(net.Conn) (AuthInfo, error) func TestClientHandshakeReturnsAuthInfo(t *testing.T) { @@ -144,7 +128,7 @@ func launchServer(t *testing.T, hs serverHandshake, done chan AuthInfo) net.List return lis } -// Is run in a seperate goroutine. +// Is run in a separate goroutine. func serverHandle(t *testing.T, hs serverHandshake, done chan AuthInfo, lis net.Listener) { serverRawConn, err := lis.Accept() if err != nil { @@ -177,7 +161,7 @@ func clientHandle(t *testing.T, hs func(net.Conn, string) (AuthInfo, error), lis // Server handshake implementation in gRPC. func gRPCServerHandshake(conn net.Conn) (AuthInfo, error) { - serverTLS, err := NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") + serverTLS, err := NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { return nil, err } @@ -199,7 +183,7 @@ func gRPCClientHandshake(conn net.Conn, lisAddr string) (AuthInfo, error) { } func tlsServerHandshake(conn net.Conn) (AuthInfo, error) { - cert, err := tls.LoadX509KeyPair(tlsDir+"server1.pem", tlsDir+"server1.key") + cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go index 7597b09e358b..60409aac0fbc 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -3,34 +3,19 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go index 0ecf342da841..93f0e1d8de23 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -2,34 +2,19 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go index cfd40dfa34ab..d6bbcc9fdd95 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -2,34 +2,19 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index 25393cc641c5..f6d597a14fe6 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -1,33 +1,18 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -37,6 +22,7 @@ package oauth import ( "fmt" "io/ioutil" + "sync" "golang.org/x/net/context" "golang.org/x/oauth2" @@ -94,7 +80,7 @@ func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[s return nil, err } return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, + "authorization": token.Type() + " " + token.AccessToken, }, nil } @@ -114,7 +100,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { return map[string]string{ - "authorization": oa.token.TokenType + " " + oa.token.AccessToken, + "authorization": oa.token.Type() + " " + oa.token.AccessToken, }, nil } @@ -132,20 +118,27 @@ func NewComputeEngine() credentials.PerRPCCredentials { // serviceAccount represents PerRPCCredentials via JWT signing key. type serviceAccount struct { + mu sync.Mutex config *jwt.Config -} - -func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - token, err := s.config.TokenSource(ctx).Token() - if err != nil { - return nil, err + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } } return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, + "authorization": s.t.Type() + " " + s.t.AccessToken, }, nil } -func (s serviceAccount) RequireTransportSecurity() bool { +func (s *serviceAccount) RequireTransportSecurity() bool { return true } @@ -156,7 +149,7 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerR if err != nil { return nil, err } - return serviceAccount{config: config}, nil + return &serviceAccount{config: config}, nil } // NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index a35f218852bd..187adbb117f2 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + /* Package grpc implements an RPC system called gRPC. -See www.grpc.io for more information about gRPC. +See grpc.io for more information about gRPC. */ package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/examples/README.md b/vendor/google.golang.org/grpc/examples/README.md index 6ea6b35f62aa..6d9175c68837 100644 --- a/vendor/google.golang.org/grpc/examples/README.md +++ b/vendor/google.golang.org/grpc/examples/README.md @@ -8,7 +8,7 @@ For this sample, we've already generated the server and client stubs from [hello PREREQUISITES ------------- -- This requires Go 1.5 or later +- This requires Go 1.7 or later - Requires that [GOPATH is set](https://golang.org/doc/code.html#GOPATH) ``` @@ -51,7 +51,10 @@ OPTIONAL - Rebuilding the generated code ``` $ go get -a github.com/golang/protobuf/protoc-gen-go -$ -$ # from this dir; invoke protoc -$ protoc -I ./helloworld/helloworld/ ./helloworld/helloworld/helloworld.proto --go_out=plugins=grpc:helloworld +``` + +3 Rebuild the generated Go code. + +``` +$ go generate google.golang.org/grpc/examples/helloworld/... ``` diff --git a/vendor/google.golang.org/grpc/examples/gotutorial.md b/vendor/google.golang.org/grpc/examples/gotutorial.md index a86b5b6c862a..a520a5a155a7 100644 --- a/vendor/google.golang.org/grpc/examples/gotutorial.md +++ b/vendor/google.golang.org/grpc/examples/gotutorial.md @@ -2,11 +2,11 @@ This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: -- Define a service in a .proto file. +- Define a service in a `.proto` file. - Generate server and client code using the protocol buffer compiler. - Use the Go gRPC API to write a simple client and server for your service. -It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers] (https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, which is currently in alpha release:you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. +It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. @@ -14,7 +14,7 @@ This isn't a comprehensive guide to using gRPC in Go: more reference documentati Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. -With gRPC we can define our service once in a .proto file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. +With gRPC we can define our service once in a `.proto` file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. ## Example code and setup @@ -33,9 +33,9 @@ You also should have the relevant tools installed to generate the server and cli ## Defining the service -Our first step (as you'll know from the [quick start](http://www.grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). You can see the complete .proto file in [examples/route_guide/routeguide/route_guide.proto](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/routeguide/route_guide.proto). +Our first step (as you'll know from the [quick start](https://grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). You can see the complete `.proto` file in [examples/route_guide/routeguide/route_guide.proto](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/routeguide/route_guide.proto). -To define a service, you specify a named `service` in your .proto file: +To define a service, you specify a named `service` in your `.proto` file: ```proto service RouteGuide { @@ -74,7 +74,7 @@ Then you define `rpc` methods inside your service definition, specifying their r rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} ``` -Our .proto file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: +Our `.proto` file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: ```proto // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). @@ -89,7 +89,7 @@ message Point { ## Generating client and server code -Next we need to generate the gRPC client and server interfaces from our .proto service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. +Next we need to generate the gRPC client and server interfaces from our `.proto` service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): @@ -263,7 +263,7 @@ Once we've implemented all our methods, we also need to start up a gRPC server s ```go flag.Parse() -lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } @@ -274,7 +274,7 @@ grpcServer.Serve(lis) ``` To build and start a server, we: -1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))`. +1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))`. 2. Create an instance of the gRPC server using `grpc.NewServer()`. 3. Register our service implementation with the gRPC server. 4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. @@ -298,7 +298,7 @@ defer conn.Close() You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. -Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our .proto. +Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our `.proto` file. ```go client := pb.NewRouteGuideClient(conn) diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go index a451c99dc926..3dc426e24b81 100644 --- a/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go +++ b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go @@ -1,33 +1,18 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go index 162cf908010d..702a3b6170f5 100644 --- a/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go +++ b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go @@ -1,36 +1,23 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc -I ../helloworld --go_out=plugins=grpc:../helloworld ../helloworld/helloworld.proto + package main import ( diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go index c8c8942a12fb..64bd1ef3c6fb 100644 --- a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go +++ b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: helloworld.proto -// DO NOT EDIT! /* Package helloworld is a generated protocol buffer package. @@ -44,6 +43,13 @@ func (m *HelloRequest) String() string { return proto.CompactTextStri func (*HelloRequest) ProtoMessage() {} func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *HelloRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + // The response message containing the greetings type HelloReply struct { Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` @@ -54,6 +60,13 @@ func (m *HelloReply) String() string { return proto.CompactTextString func (*HelloReply) ProtoMessage() {} func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *HelloReply) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + func init() { proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") @@ -136,16 +149,16 @@ var _Greeter_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 174 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, - 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x00, 0xad, 0x50, 0x62, 0x70, 0x32, 0xe0, 0x92, 0xce, 0xcc, 0xd7, - 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0x46, 0x52, 0xeb, - 0xc4, 0x0f, 0x56, 0x1c, 0x0e, 0x62, 0x07, 0x80, 0xbc, 0x14, 0xc0, 0x98, 0xc4, 0x06, 0xf6, 0x9b, - 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, + 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0x19, 0x70, 0x49, 0x67, 0xe6, + 0xeb, 0xa5, 0x17, 0x15, 0x24, 0xeb, 0xa5, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0x23, 0xa9, + 0x75, 0xe2, 0x07, 0x2b, 0x0e, 0x07, 0xb1, 0x03, 0x40, 0x5e, 0x0a, 0x60, 0x4c, 0x62, 0x03, 0xfb, + 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto index c3ddd4aee77f..d79a6a0d1f57 100644 --- a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto +++ b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto @@ -1,31 +1,16 @@ -// Copyright 2015, Google Inc. -// All rights reserved. +// Copyright 2015 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go similarity index 100% rename from vendor/google.golang.org/grpc/examples/helloworld/mock/mock_helloworld/hw_mock.go rename to vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock/hw_test.go b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go similarity index 62% rename from vendor/google.golang.org/grpc/examples/helloworld/mock/hw_test.go rename to vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go index b63fec7ad63f..5ad9b8b0ddb4 100644 --- a/vendor/google.golang.org/grpc/examples/helloworld/mock/hw_test.go +++ b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock_test.go @@ -1,4 +1,22 @@ -package mock_test +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mock_helloworld_test import ( "fmt" @@ -8,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/context" helloworld "google.golang.org/grpc/examples/helloworld/helloworld" - hwmock "google.golang.org/grpc/examples/helloworld/mock/mock_helloworld" + hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld" ) // rpcMsg implements the gomock.Matcher interface diff --git a/vendor/google.golang.org/grpc/examples/route_guide/README.md b/vendor/google.golang.org/grpc/examples/route_guide/README.md index a7c0c2b0e539..7f009d5caf92 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/README.md +++ b/vendor/google.golang.org/grpc/examples/route_guide/README.md @@ -2,9 +2,9 @@ The route guide server and client demonstrate how to use grpc go libraries to perform unary, client streaming, server streaming and full duplex RPCs. -Please refer to [gRPC Basics: Go] (http://www.grpc.io/docs/tutorials/basic/go.html) for more information. +Please refer to [gRPC Basics: Go] (https://grpc.io/docs/tutorials/basic/go.html) for more information. -See the definition of the route guide service in proto/route_guide.proto. +See the definition of the route guide service in routeguide/route_guide.proto. # Run the sample code To compile and run the server, assuming you are in the root of the route_guide diff --git a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go index fff6398d4d99..6fc7a079c51f 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go +++ b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go @@ -1,45 +1,31 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ // Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // -// It interacts with the route guide service whose definition can be found in proto/route_guide.proto. +// It interacts with the route guide service whose definition can be found in routeguide/route_guide.proto. package main import ( "flag" "io" + "log" "math/rand" "time" @@ -47,32 +33,32 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" pb "google.golang.org/grpc/examples/route_guide/routeguide" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/testdata" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") - caFile = flag.String("ca_file", "testdata/ca.pem", "The file containning the CA root cert file") + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") ) // printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { - grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) + log.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { - grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } - grpclog.Println(feature) + log.Println(feature) } // printFeatures lists all the features within the given bounding Rectangle. func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { - grpclog.Printf("Looking for features within %v", rect) + log.Printf("Looking for features within %v", rect) stream, err := client.ListFeatures(context.Background(), rect) if err != nil { - grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } for { feature, err := stream.Recv() @@ -80,9 +66,9 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { break } if err != nil { - grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } - grpclog.Println(feature) + log.Println(feature) } } @@ -95,21 +81,21 @@ func runRecordRoute(client pb.RouteGuideClient) { for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } - grpclog.Printf("Traversing %d points.", len(points)) + log.Printf("Traversing %d points.", len(points)) stream, err := client.RecordRoute(context.Background()) if err != nil { - grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { - grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) + log.Fatalf("%v.Send(%v) = %v", stream, point, err) } } reply, err := stream.CloseAndRecv() if err != nil { - grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } - grpclog.Printf("Route summary: %v", reply) + log.Printf("Route summary: %v", reply) } // runRouteChat receives a sequence of route notes, while sending notes for various locations. @@ -124,7 +110,7 @@ func runRouteChat(client pb.RouteGuideClient) { } stream, err := client.RouteChat(context.Background()) if err != nil { - grpclog.Fatalf("%v.RouteChat(_) = _, %v", client, err) + log.Fatalf("%v.RouteChat(_) = _, %v", client, err) } waitc := make(chan struct{}) go func() { @@ -136,14 +122,14 @@ func runRouteChat(client pb.RouteGuideClient) { return } if err != nil { - grpclog.Fatalf("Failed to receive a note : %v", err) + log.Fatalf("Failed to receive a note : %v", err) } - grpclog.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) + log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { - grpclog.Fatalf("Failed to send a note: %v", err) + log.Fatalf("Failed to send a note: %v", err) } } stream.CloseSend() @@ -160,19 +146,12 @@ func main() { flag.Parse() var opts []grpc.DialOption if *tls { - var sn string - if *serverHostOverride != "" { - sn = *serverHostOverride + if *caFile == "" { + *caFile = testdata.Path("ca.pem") } - var creds credentials.TransportCredentials - if *caFile != "" { - var err error - creds, err = credentials.NewClientTLSFromFile(*caFile, sn) - if err != nil { - grpclog.Fatalf("Failed to create TLS credentials %v", err) - } - } else { - creds = credentials.NewClientTLSFromCert(nil, sn) + creds, err := credentials.NewClientTLSFromFile(*caFile, *serverHostOverride) + if err != nil { + log.Fatalf("Failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { @@ -180,7 +159,7 @@ func main() { } conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { - grpclog.Fatalf("fail to dial: %v", err) + log.Fatalf("fail to dial: %v", err) } defer conn.Close() client := pb.NewRouteGuideClient(conn) diff --git a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go new file mode 100644 index 000000000000..328c929fa8e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go @@ -0,0 +1,200 @@ +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/grpc/examples/route_guide/routeguide (interfaces: RouteGuideClient,RouteGuide_RouteChatClient) + +package mock_routeguide + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + routeguide "google.golang.org/grpc/examples/route_guide/routeguide" + metadata "google.golang.org/grpc/metadata" +) + +// Mock of RouteGuideClient interface +type MockRouteGuideClient struct { + ctrl *gomock.Controller + recorder *_MockRouteGuideClientRecorder +} + +// Recorder for MockRouteGuideClient (not exported) +type _MockRouteGuideClientRecorder struct { + mock *MockRouteGuideClient +} + +func NewMockRouteGuideClient(ctrl *gomock.Controller) *MockRouteGuideClient { + mock := &MockRouteGuideClient{ctrl: ctrl} + mock.recorder = &_MockRouteGuideClientRecorder{mock} + return mock +} + +func (_m *MockRouteGuideClient) EXPECT() *_MockRouteGuideClientRecorder { + return _m.recorder +} + +func (_m *MockRouteGuideClient) GetFeature(_param0 context.Context, _param1 *routeguide.Point, _param2 ...grpc.CallOption) (*routeguide.Feature, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "GetFeature", _s...) + ret0, _ := ret[0].(*routeguide.Feature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) GetFeature(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "GetFeature", _s...) +} + +func (_m *MockRouteGuideClient) ListFeatures(_param0 context.Context, _param1 *routeguide.Rectangle, _param2 ...grpc.CallOption) (routeguide.RouteGuide_ListFeaturesClient, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "ListFeatures", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_ListFeaturesClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) ListFeatures(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "ListFeatures", _s...) +} + +func (_m *MockRouteGuideClient) RecordRoute(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RecordRouteClient, error) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "RecordRoute", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_RecordRouteClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) RecordRoute(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "RecordRoute", _s...) +} + +func (_m *MockRouteGuideClient) RouteChat(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RouteChatClient, error) { + _s := []interface{}{_param0} + for _, _x := range _param1 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "RouteChat", _s...) + ret0, _ := ret[0].(routeguide.RouteGuide_RouteChatClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuideClientRecorder) RouteChat(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0}, arg1...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "RouteChat", _s...) +} + +// Mock of RouteGuide_RouteChatClient interface +type MockRouteGuide_RouteChatClient struct { + ctrl *gomock.Controller + recorder *_MockRouteGuide_RouteChatClientRecorder +} + +// Recorder for MockRouteGuide_RouteChatClient (not exported) +type _MockRouteGuide_RouteChatClientRecorder struct { + mock *MockRouteGuide_RouteChatClient +} + +func NewMockRouteGuide_RouteChatClient(ctrl *gomock.Controller) *MockRouteGuide_RouteChatClient { + mock := &MockRouteGuide_RouteChatClient{ctrl: ctrl} + mock.recorder = &_MockRouteGuide_RouteChatClientRecorder{mock} + return mock +} + +func (_m *MockRouteGuide_RouteChatClient) EXPECT() *_MockRouteGuide_RouteChatClientRecorder { + return _m.recorder +} + +func (_m *MockRouteGuide_RouteChatClient) CloseSend() error { + ret := _m.ctrl.Call(_m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) CloseSend() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CloseSend") +} + +func (_m *MockRouteGuide_RouteChatClient) Context() context.Context { + ret := _m.ctrl.Call(_m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Context() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Context") +} + +func (_m *MockRouteGuide_RouteChatClient) Header() (metadata.MD, error) { + ret := _m.ctrl.Call(_m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Header() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Header") +} + +func (_m *MockRouteGuide_RouteChatClient) Recv() (*routeguide.RouteNote, error) { + ret := _m.ctrl.Call(_m, "Recv") + ret0, _ := ret[0].(*routeguide.RouteNote) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Recv() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Recv") +} + +func (_m *MockRouteGuide_RouteChatClient) RecvMsg(_param0 interface{}) error { + ret := _m.ctrl.Call(_m, "RecvMsg", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) RecvMsg(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "RecvMsg", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) Send(_param0 *routeguide.RouteNote) error { + ret := _m.ctrl.Call(_m, "Send", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Send(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Send", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) SendMsg(_param0 interface{}) error { + ret := _m.ctrl.Call(_m, "SendMsg", _param0) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) SendMsg(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SendMsg", arg0) +} + +func (_m *MockRouteGuide_RouteChatClient) Trailer() metadata.MD { + ret := _m.ctrl.Call(_m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +func (_mr *_MockRouteGuide_RouteChatClientRecorder) Trailer() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Trailer") +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go new file mode 100644 index 000000000000..8c6d63c72e7c --- /dev/null +++ b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock_test.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mock_routeguide_test + +import ( + "fmt" + "testing" + + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide" + rgpb "google.golang.org/grpc/examples/route_guide/routeguide" +) + +var ( + msg = &rgpb.RouteNote{ + Location: &rgpb.Point{Latitude: 17, Longitude: 29}, + Message: "Taxi-cab", + } +) + +func TestRouteChat(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Create mock for the stream returned by RouteChat + stream := rgmock.NewMockRouteGuide_RouteChatClient(ctrl) + // set expectation on sending. + stream.EXPECT().Send( + gomock.Any(), + ).Return(nil) + // Set expectation on receiving. + stream.EXPECT().Recv().Return(msg, nil) + stream.EXPECT().CloseSend().Return(nil) + // Create mock for the client interface. + rgclient := rgmock.NewMockRouteGuideClient(ctrl) + // Set expectation on RouteChat + rgclient.EXPECT().RouteChat( + gomock.Any(), + ).Return(stream, nil) + if err := testRouteChat(rgclient); err != nil { + t.Fatalf("Test failed: %v", err) + } +} + +func testRouteChat(client rgpb.RouteGuideClient) error { + stream, err := client.RouteChat(context.Background()) + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + if err := stream.CloseSend(); err != nil { + return err + } + got, err := stream.Recv() + if err != nil { + return err + } + if !proto.Equal(got, msg) { + return fmt.Errorf("stream.Recv() = %v, want %v", got, msg) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go index cbcf2f30ade4..cf7f3937e118 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go +++ b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: route_guide.proto -// DO NOT EDIT! /* Package routeguide is a generated protocol buffer package. @@ -51,6 +50,20 @@ func (m *Point) String() string { return proto.CompactTextString(m) } func (*Point) ProtoMessage() {} func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Point) GetLatitude() int32 { + if m != nil { + return m.Latitude + } + return 0 +} + +func (m *Point) GetLongitude() int32 { + if m != nil { + return m.Longitude + } + return 0 +} + // A latitude-longitude rectangle, represented as two diagonally opposite // points "lo" and "hi". type Rectangle struct { @@ -94,6 +107,13 @@ func (m *Feature) String() string { return proto.CompactTextString(m) func (*Feature) ProtoMessage() {} func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Feature) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *Feature) GetLocation() *Point { if m != nil { return m.Location @@ -121,6 +141,13 @@ func (m *RouteNote) GetLocation() *Point { return nil } +func (m *RouteNote) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + // A RouteSummary is received in response to a RecordRoute rpc. // // It contains the number of individual points received, the number of @@ -142,6 +169,34 @@ func (m *RouteSummary) String() string { return proto.CompactTextStri func (*RouteSummary) ProtoMessage() {} func (*RouteSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *RouteSummary) GetPointCount() int32 { + if m != nil { + return m.PointCount + } + return 0 +} + +func (m *RouteSummary) GetFeatureCount() int32 { + if m != nil { + return m.FeatureCount + } + return 0 +} + +func (m *RouteSummary) GetDistance() int32 { + if m != nil { + return m.Distance + } + return 0 +} + +func (m *RouteSummary) GetElapsedTime() int32 { + if m != nil { + return m.ElapsedTime + } + return 0 +} + func init() { proto.RegisterType((*Point)(nil), "routeguide.Point") proto.RegisterType((*Rectangle)(nil), "routeguide.Rectangle") @@ -459,30 +514,30 @@ func init() { proto.RegisterFile("route_guide.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 404 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x4a, 0xeb, 0x40, - 0x14, 0xed, 0xf4, 0xb5, 0xaf, 0x2f, 0x37, 0x79, 0x3c, 0x3a, 0x0f, 0x21, 0x54, 0x41, 0x8d, 0x9b, - 0x6e, 0x0c, 0xa5, 0x82, 0x4b, 0xc5, 0x16, 0xec, 0xa6, 0x48, 0x8d, 0xdd, 0x97, 0x31, 0x19, 0xd3, - 0x81, 0x24, 0x13, 0x92, 0x09, 0xe8, 0x07, 0xf8, 0x05, 0xfe, 0xb0, 0x93, 0xc9, 0xa4, 0x4d, 0xb5, - 0xc5, 0x5d, 0xe6, 0xdc, 0x73, 0xee, 0x3d, 0xf7, 0x5c, 0x02, 0xfd, 0x8c, 0x17, 0x82, 0xae, 0xc2, - 0x82, 0x05, 0xd4, 0x4d, 0x33, 0x2e, 0x38, 0x06, 0x05, 0x29, 0xc4, 0xb9, 0x83, 0xee, 0x82, 0xb3, - 0x44, 0xe0, 0x01, 0xfc, 0x89, 0x88, 0x60, 0xa2, 0x08, 0xa8, 0x8d, 0xce, 0xd0, 0xb0, 0xeb, 0x6d, - 0xde, 0xf8, 0x04, 0x8c, 0x88, 0x27, 0x61, 0x55, 0x6c, 0xab, 0xe2, 0x16, 0x70, 0x1e, 0xc1, 0xf0, - 0xa8, 0x2f, 0x48, 0x12, 0x46, 0x14, 0x9f, 0x43, 0x3b, 0xe2, 0xaa, 0x81, 0x39, 0xee, 0xbb, 0xdb, - 0x41, 0xae, 0x9a, 0xe2, 0xc9, 0x62, 0x49, 0x59, 0x33, 0xd5, 0x66, 0x3f, 0x65, 0xcd, 0x9c, 0x39, - 0xf4, 0xee, 0x29, 0x11, 0x45, 0x46, 0x31, 0x86, 0x4e, 0x42, 0xe2, 0xca, 0x93, 0xe1, 0xa9, 0x6f, - 0x7c, 0x29, 0xbd, 0x72, 0x5f, 0xba, 0xe3, 0xc9, 0xe1, 0x3e, 0x1b, 0x8a, 0xb3, 0x94, 0x06, 0xcb, - 0xea, 0x03, 0x17, 0xbb, 0x5a, 0xf4, 0xa3, 0x16, 0xdb, 0xd0, 0x8b, 0x69, 0x9e, 0x93, 0xb0, 0x5a, - 0xdc, 0xf0, 0xea, 0xa7, 0xf3, 0x81, 0xc0, 0x52, 0x6d, 0x9f, 0x8a, 0x38, 0x26, 0xd9, 0x1b, 0x3e, - 0x05, 0x33, 0x2d, 0xd5, 0x2b, 0x9f, 0x17, 0x89, 0xd0, 0x21, 0x82, 0x82, 0xa6, 0x25, 0x82, 0x2f, - 0xe0, 0xef, 0x4b, 0xb5, 0x95, 0xa6, 0x54, 0x51, 0x5a, 0x1a, 0xac, 0x48, 0xf2, 0x0e, 0x01, 0xcb, - 0x65, 0x9a, 0x3e, 0xb5, 0x7f, 0x55, 0x77, 0xa8, 0xdf, 0x32, 0x39, 0x8b, 0x46, 0x24, 0xcd, 0x69, - 0xb0, 0x12, 0x4c, 0x66, 0xd2, 0x51, 0x75, 0x53, 0x63, 0x4b, 0x09, 0x8d, 0xdf, 0xdb, 0x00, 0xca, - 0xd5, 0xac, 0x5c, 0x07, 0x5f, 0x03, 0xcc, 0xa8, 0xa8, 0xb3, 0xfc, 0xbe, 0xe9, 0xe0, 0x7f, 0x13, - 0xd2, 0x3c, 0xa7, 0x85, 0x6f, 0xc0, 0x9a, 0xcb, 0xa9, 0x1a, 0xc8, 0xf1, 0x51, 0x93, 0xb6, 0xb9, - 0xf6, 0x01, 0xf5, 0x08, 0x49, 0xbd, 0x29, 0x59, 0x3c, 0x0b, 0x94, 0x97, 0x7d, 0x83, 0xed, 0x9d, - 0x8e, 0x8d, 0x1c, 0x9d, 0xd6, 0x10, 0xe1, 0x5b, 0x7d, 0xb2, 0xe9, 0x9a, 0x88, 0x2f, 0xc3, 0xeb, - 0x4b, 0x0e, 0xf6, 0xc3, 0xa5, 0x7c, 0x84, 0x26, 0x23, 0x38, 0x66, 0xdc, 0x0d, 0xb3, 0xd4, 0x77, - 0xe9, 0x2b, 0x89, 0xd3, 0x88, 0xe6, 0x0d, 0xfa, 0xe4, 0xdf, 0x36, 0xa3, 0x45, 0xf9, 0x4f, 0x2c, - 0xd0, 0xf3, 0x6f, 0xf5, 0x73, 0x5c, 0x7d, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xe4, 0xef, 0xe6, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xdd, 0xca, 0xd3, 0x40, + 0x10, 0xfd, 0x36, 0x7e, 0x9f, 0x6d, 0x26, 0x11, 0xe9, 0x88, 0x10, 0xa2, 0xa0, 0x8d, 0x37, 0xbd, + 0x31, 0x94, 0x0a, 0x5e, 0x56, 0x6c, 0xc1, 0xde, 0x14, 0xa9, 0xb1, 0xf7, 0x65, 0x4d, 0xc6, 0x74, + 0x61, 0x93, 0x0d, 0xc9, 0x06, 0xf4, 0x01, 0x7c, 0x02, 0x5f, 0x58, 0xb2, 0x49, 0xda, 0x54, 0x5b, + 0xbc, 0xdb, 0x39, 0x73, 0xce, 0xfc, 0x9c, 0x61, 0x61, 0x52, 0xaa, 0x5a, 0xd3, 0x21, 0xad, 0x45, + 0x42, 0x61, 0x51, 0x2a, 0xad, 0x10, 0x0c, 0x64, 0x90, 0xe0, 0x23, 0x3c, 0xec, 0x94, 0xc8, 0x35, + 0xfa, 0x30, 0x96, 0x5c, 0x0b, 0x5d, 0x27, 0xe4, 0xb1, 0xd7, 0x6c, 0xf6, 0x10, 0x9d, 0x62, 0x7c, + 0x09, 0xb6, 0x54, 0x79, 0xda, 0x26, 0x2d, 0x93, 0x3c, 0x03, 0xc1, 0x17, 0xb0, 0x23, 0x8a, 0x35, + 0xcf, 0x53, 0x49, 0x38, 0x05, 0x4b, 0x2a, 0x53, 0xc0, 0x59, 0x4c, 0xc2, 0x73, 0xa3, 0xd0, 0x74, + 0x89, 0x2c, 0xa9, 0x1a, 0xca, 0x51, 0x98, 0x32, 0xd7, 0x29, 0x47, 0x11, 0x6c, 0x61, 0xf4, 0x89, + 0xb8, 0xae, 0x4b, 0x42, 0x84, 0xfb, 0x9c, 0x67, 0xed, 0x4c, 0x76, 0x64, 0xde, 0xf8, 0x16, 0xc6, + 0x52, 0xc5, 0x5c, 0x0b, 0x95, 0xdf, 0xae, 0x73, 0xa2, 0x04, 0x7b, 0xb0, 0xa3, 0x26, 0xfb, 0x59, + 0xe9, 0x4b, 0x2d, 0xfb, 0xaf, 0x16, 0x3d, 0x18, 0x65, 0x54, 0x55, 0x3c, 0x6d, 0x17, 0xb7, 0xa3, + 0x3e, 0x0c, 0x7e, 0x33, 0x70, 0x4d, 0xd9, 0xaf, 0x75, 0x96, 0xf1, 0xf2, 0x27, 0xbe, 0x02, 0xa7, + 0x68, 0xd4, 0x87, 0x58, 0xd5, 0xb9, 0xee, 0x4c, 0x04, 0x03, 0xad, 0x1b, 0x04, 0xdf, 0xc0, 0x93, + 0xef, 0xed, 0x56, 0x1d, 0xa5, 0xb5, 0xd2, 0xed, 0xc0, 0x96, 0xe4, 0xc3, 0x38, 0x11, 0x95, 0xe6, + 0x79, 0x4c, 0xde, 0xa3, 0xf6, 0x0e, 0x7d, 0x8c, 0x53, 0x70, 0x49, 0xf2, 0xa2, 0xa2, 0xe4, 0xa0, + 0x45, 0x46, 0xde, 0xbd, 0xc9, 0x3b, 0x1d, 0xb6, 0x17, 0x19, 0x2d, 0x7e, 0x59, 0x00, 0x66, 0xaa, + 0x4d, 0xb3, 0x0e, 0xbe, 0x07, 0xd8, 0x90, 0xee, 0xbd, 0xfc, 0x77, 0x53, 0xff, 0xd9, 0x10, 0xea, + 0x78, 0xc1, 0x1d, 0x2e, 0xc1, 0xdd, 0x8a, 0xaa, 0x17, 0x56, 0xf8, 0x7c, 0x48, 0x3b, 0x5d, 0xfb, + 0x86, 0x7a, 0xce, 0x70, 0x09, 0x4e, 0x44, 0xb1, 0x2a, 0x13, 0x33, 0xcb, 0xb5, 0xc6, 0xde, 0x45, + 0xc5, 0x81, 0x8f, 0xc1, 0xdd, 0x8c, 0xe1, 0x87, 0xee, 0x64, 0xeb, 0x23, 0xd7, 0x7f, 0x35, 0xef, + 0x2f, 0xe9, 0x5f, 0x87, 0x1b, 0xf9, 0x9c, 0xad, 0xe6, 0xf0, 0x42, 0xa8, 0x30, 0x2d, 0x8b, 0x38, + 0xa4, 0x1f, 0x3c, 0x2b, 0x24, 0x55, 0x03, 0xfa, 0xea, 0xe9, 0xd9, 0xa3, 0x5d, 0xf3, 0x27, 0x76, + 0xec, 0xdb, 0x63, 0xf3, 0x39, 0xde, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xe4, 0xef, 0xe6, 0x31, 0x03, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto index 5a782aa273bb..fe21e437adbc 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto +++ b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto @@ -1,31 +1,16 @@ -// Copyright 2015, Google Inc. -// All rights reserved. +// Copyright 2015 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; diff --git a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go index 5932722bfd79..5d919047ecdb 100644 --- a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go +++ b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go @@ -1,40 +1,27 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc -I ../routeguide --go_out=plugins=grpc:../routeguide ../routeguide/route_guide.proto + // Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // -// It implements the route guide service whose definition can be found in proto/route_guide.proto. +// It implements the route guide service whose definition can be found in routeguide/route_guide.proto. package main import ( @@ -43,6 +30,7 @@ import ( "fmt" "io" "io/ioutil" + "log" "math" "net" "time" @@ -51,7 +39,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/testdata" "github.com/golang/protobuf/proto" @@ -60,8 +48,8 @@ import ( var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") - certFile = flag.String("cert_file", "testdata/server1.pem", "The TLS cert file") - keyFile = flag.String("key_file", "testdata/server1.key", "The TLS key file") + certFile = flag.String("cert_file", "", "The TLS cert file") + keyFile = flag.String("key_file", "", "The TLS key file") jsonDBFile = flag.String("json_db_file", "testdata/route_guide_db.json", "A json file containing a list of features") port = flag.Int("port", 10000, "The server port") ) @@ -159,10 +147,10 @@ func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error func (s *routeGuideServer) loadFeatures(filePath string) { file, err := ioutil.ReadFile(filePath) if err != nil { - grpclog.Fatalf("Failed to load default features: %v", err) + log.Fatalf("Failed to load default features: %v", err) } if err := json.Unmarshal(file, &s.savedFeatures); err != nil { - grpclog.Fatalf("Failed to load default features: %v", err) + log.Fatalf("Failed to load default features: %v", err) } } @@ -221,15 +209,21 @@ func newServer() *routeGuideServer { func main() { flag.Parse() - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) if err != nil { - grpclog.Fatalf("failed to listen: %v", err) + log.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *tls { + if *certFile == "" { + *certFile = testdata.Path("server1.pem") + } + if *keyFile == "" { + *keyFile = testdata.Path("server1.key") + } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - grpclog.Fatalf("Failed to generate credentials %v", err) + log.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem b/vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key b/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem b/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go deleted file mode 100644 index b61c57e88de8..000000000000 --- a/vendor/google.golang.org/grpc/go16.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "fmt" - "net" - "net/http" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req.Cancel = ctx.Done() - if err := req.Write(conn); err != nil { - return fmt.Errorf("failed to write the HTTP request: %v", err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go deleted file mode 100644 index 844f0e1899b0..000000000000 --- a/vendor/google.golang.org/grpc/go17.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "net" - "net/http" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { - return err - } - return nil -} diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go index 524e429df3ef..db56ff362172 100644 --- a/vendor/google.golang.org/grpc/grpclb.go +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,7 +28,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/naming" @@ -74,41 +59,21 @@ type balanceLoadClientStream struct { ClientStream } -func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { +func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { return x.ClientStream.SendMsg(m) } -func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { - m := new(lbpb.LoadBalanceResponse) +func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { + m := new(lbmpb.LoadBalanceResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolution for grpclb should provide. The -// name resolver used by grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - // NewGRPCLBBalancer creates a grpclb load balancer. func NewGRPCLBBalancer(r naming.Resolver) Balancer { - return &balancer{ + return &grpclbBalancer{ r: r, } } @@ -131,27 +96,27 @@ type grpclbAddrInfo struct { dropForLoadBalancing bool } -type balancer struct { - r naming.Resolver - target string - mu sync.Mutex - seq int // a sequence number to make sure addrCh does not get stale addresses. - w naming.Watcher - addrCh chan []Address - rbs []remoteBalancerInfo - addrs []*grpclbAddrInfo - next int - waitCh chan struct{} - done bool - expTimer *time.Timer - rand *rand.Rand - - clientStats lbpb.ClientStats +type grpclbBalancer struct { + r naming.Resolver + target string + mu sync.Mutex + seq int // a sequence number to make sure addrCh does not get stale addresses. + w naming.Watcher + addrCh chan []Address + rbs []remoteBalancerInfo + addrs []*grpclbAddrInfo + next int + waitCh chan struct{} + done bool + rand *rand.Rand + + clientStats lbmpb.ClientStats } -func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { +func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { updates, err := w.Next() if err != nil { + grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) return err } b.mu.Lock() @@ -173,24 +138,24 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn if exist { continue } - md, ok := update.Metadata.(*AddrMetadataGRPCLB) + md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) if !ok { // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Printf("The name resolution contains unexpected metadata %v", update.Metadata) + grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) continue } switch md.AddrType { - case Backend: + case naming.Backend: // TODO: Revisit the handling here and may introduce some fallback mechanism. - grpclog.Printf("The name resolution does not give grpclb addresses") + grpclog.Errorf("The name resolution does not give grpclb addresses") continue - case GRPCLB: + case naming.GRPCLB: b.rbs = append(b.rbs, remoteBalancerInfo{ addr: update.Addr, name: md.ServerName, }) default: - grpclog.Printf("Received unknow address type %d", md.AddrType) + grpclog.Errorf("Received unknow address type %d", md.AddrType) continue } case naming.Delete: @@ -202,7 +167,7 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn } } default: - grpclog.Println("Unknown update.Op ", update.Op) + grpclog.Errorf("Unknown update.Op %v", update.Op) } } // TODO: Fall back to the basic round-robin load balancing if the resulting address is @@ -215,42 +180,33 @@ func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerIn return nil } -func (b *balancer) serverListExpire(seq int) { - b.mu.Lock() - defer b.mu.Unlock() - // TODO: gRPC interanls do not clear the connections when the server list is stale. - // This means RPCs will keep using the existing server list until b receives new - // server list even though the list is expired. Revisit this behavior later. - if b.done || seq < b.seq { - return - } - b.next = 0 - b.addrs = nil - // Ask grpc internals to close all the corresponding connections. - b.addrCh <- nil -} - -func convertDuration(d *lbpb.Duration) time.Duration { +func convertDuration(d *lbmpb.Duration) time.Duration { if d == nil { return 0 } return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond } -func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { +func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { if l == nil { return } servers := l.GetServers() - expiration := convertDuration(l.GetExpirationInterval()) var ( sl []*grpclbAddrInfo addrs []Address ) for _, s := range servers { md := metadata.Pairs("lb-token", s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } addr := Address{ - Addr: fmt.Sprintf("%s:%d", net.IP(s.IpAddress), s.Port), + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), Metadata: &md, } sl = append(sl, &grpclbAddrInfo{ @@ -270,20 +226,11 @@ func (b *balancer) processServerList(l *lbpb.ServerList, seq int) { b.next = 0 b.addrs = sl b.addrCh <- addrs - if b.expTimer != nil { - b.expTimer.Stop() - b.expTimer = nil - } - if expiration > 0 { - b.expTimer = time.AfterFunc(expiration, func() { - b.serverListExpire(seq) - }) - } } return } -func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { +func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { ticker := time.NewTicker(interval) defer ticker.Stop() for { @@ -294,29 +241,30 @@ func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Dura } b.mu.Lock() stats := b.clientStats - b.clientStats = lbpb.ClientStats{} // Clear the stats. + b.clientStats = lbmpb.ClientStats{} // Clear the stats. b.mu.Unlock() t := time.Now() - stats.Timestamp = &lbpb.Timestamp{ + stats.Timestamp = &lbmpb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } - if err := s.Send(&lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + if err := s.Send(&lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ ClientStats: &stats, }, }); err != nil { + grpclog.Errorf("grpclb: failed to send load report: %v", err) return } } } -func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { +func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbc.BalanceLoad(ctx) if err != nil { - grpclog.Printf("Failed to perform RPC to the remote balancer %v", err) + grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) return } b.mu.Lock() @@ -325,37 +273,39 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b return } b.mu.Unlock() - initReq := &lbpb.LoadBalanceRequest{ - LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ - InitialRequest: &lbpb.InitialLoadBalanceRequest{ + initReq := &lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbmpb.InitialLoadBalanceRequest{ Name: b.target, }, }, } if err := stream.Send(initReq); err != nil { + grpclog.Errorf("grpclb: failed to send init request: %v", err) // TODO: backoff on retry? return true } reply, err := stream.Recv() if err != nil { + grpclog.Errorf("grpclb: failed to recv init response: %v", err) // TODO: backoff on retry? return true } initResp := reply.GetInitialResponse() if initResp == nil { - grpclog.Println("Failed to receive the initial response from the remote balancer.") + grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") return } // TODO: Support delegation. if initResp.LoadBalancerDelegate != "" { // delegation - grpclog.Println("TODO: Delegation is not supported yet.") + grpclog.Errorf("TODO: Delegation is not supported yet.") return } streamDone := make(chan struct{}) defer close(streamDone) b.mu.Lock() - b.clientStats = lbpb.ClientStats{} // Clear client stats. + b.clientStats = lbmpb.ClientStats{} // Clear client stats. b.mu.Unlock() if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { go b.sendLoadReport(stream, d, streamDone) @@ -364,6 +314,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b for { reply, err := stream.Recv() if err != nil { + grpclog.Errorf("grpclb: failed to recv server list: %v", err) break } b.mu.Lock() @@ -381,7 +332,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b return true } -func (b *balancer) Start(target string, config BalancerConfig) error { +func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { b.rand = rand.New(rand.NewSource(time.Now().Unix())) // TODO: Fall back to the basic direct connection if there is no name resolver. if b.r == nil { @@ -397,6 +348,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { w, err := b.r.Resolve(target) if err != nil { b.mu.Unlock() + grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) return err } b.w = w @@ -406,7 +358,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { go func() { for { if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) close(balancerAddrsCh) return } @@ -490,22 +442,32 @@ func (b *balancer) Start(target string, config BalancerConfig) error { cc.Close() } // Talk to the remote load balancer to get the server list. - var err error - creds := config.DialCreds - ccError = make(chan struct{}) - if creds == nil { - cc, err = Dial(rb.addr, WithInsecure()) - } else { + var ( + err error + dopts []DialOption + ) + if creds := config.DialCreds; creds != nil { if rb.name != "" { if err := creds.OverrideServerName(rb.name); err != nil { - grpclog.Printf("Failed to override the server name in the credentials: %v", err) + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) continue } } - cc, err = Dial(rb.addr, WithTransportCredentials(creds)) + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + dopts = append(dopts, WithInsecure()) } + if dialer := config.Dialer; dialer != nil { + // WithDialer takes a different type of function, so we instead use a special DialOption here. + dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) + } + dopts = append(dopts, WithBlock()) + ccError = make(chan struct{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cc, err = DialContext(ctx, rb.addr, dopts...) + cancel() if err != nil { - grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) close(ccError) continue } @@ -529,7 +491,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error { return nil } -func (b *balancer) down(addr Address, err error) { +func (b *grpclbBalancer) down(addr Address, err error) { b.mu.Lock() defer b.mu.Unlock() for _, a := range b.addrs { @@ -540,7 +502,7 @@ func (b *balancer) down(addr Address, err error) { } } -func (b *balancer) Up(addr Address) func(error) { +func (b *grpclbBalancer) Up(addr Address) func(error) { b.mu.Lock() defer b.mu.Unlock() if b.done { @@ -568,7 +530,7 @@ func (b *balancer) Up(addr Address) func(error) { } } -func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { +func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { var ch chan struct{} b.mu.Lock() if b.done { @@ -638,17 +600,10 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre } } if !opts.BlockingWait { - if len(b.addrs) == 0 { - b.clientStats.NumCallsFinished++ - b.clientStats.NumCallsFinishedWithClientFailedToSend++ - b.mu.Unlock() - err = Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on b.addrs for a failfast RPC. - addr = b.addrs[b.next].addr - b.next++ + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ b.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") return } // Wait on b.waitCh for non-failfast RPCs. @@ -725,17 +680,17 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre } } -func (b *balancer) Notify() <-chan []Address { +func (b *grpclbBalancer) Notify() <-chan []Address { return b.addrCh } -func (b *balancer) Close() error { +func (b *grpclbBalancer) Close() error { b.mu.Lock() defer b.mu.Unlock() - b.done = true - if b.expTimer != nil { - b.expTimer.Stop() + if b.done { + return errBalancerClosed } + b.done = true if b.waitCh != nil { close(b.waitCh) } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go new file mode 100644 index 000000000000..aba962840c80 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go @@ -0,0 +1,21 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer +// message and service protobuf definitions. +package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go similarity index 79% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go index f63941bd803a..f4a27125a4fb 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: grpclb.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/messages/messages.proto /* -Package grpc_lb_v1 is a generated protocol buffer package. +Package messages is a generated protocol buffer package. It is generated from these files: - grpclb.proto + grpc_lb_v1/messages/messages.proto It has these top-level messages: Duration @@ -19,7 +18,7 @@ It has these top-level messages: ServerList Server */ -package grpc_lb_v1 +package messages import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -473,11 +472,6 @@ type ServerList struct { // across more servers. The client should consume the server list in order // unless instructed otherwise via the client_config. Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` - // Indicates the amount of time that the client should consider this server - // list as valid. It may be considered stale after waiting this interval of - // time after receiving the list. If the interval is not positive, the - // client can assume the list is valid until the next list is received. - ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"` } func (m *ServerList) Reset() { *m = ServerList{} } @@ -492,13 +486,6 @@ func (m *ServerList) GetServers() []*Server { return nil } -func (m *ServerList) GetExpirationInterval() *Duration { - if m != nil { - return m.ExpirationInterval - } - return nil -} - // Contains server information. When none of the [drop_for_*] fields are true, // use the other fields. When drop_for_rate_limiting is true, ignore all other // fields. Use drop_for_load_balancing only when it is true and @@ -576,54 +563,53 @@ func init() { proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") } -func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 733 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, - 0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34, - 0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a, - 0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9, - 0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1, - 0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92, - 0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51, - 0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0, - 0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51, - 0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7, - 0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4, - 0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13, - 0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67, - 0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed, - 0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93, - 0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f, - 0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2, - 0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4, - 0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd, - 0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2, - 0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd, - 0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71, - 0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a, - 0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c, - 0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c, - 0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0, - 0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84, - 0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37, - 0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5, - 0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f, - 0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07, - 0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71, - 0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f, - 0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87, - 0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94, - 0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56, - 0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9, - 0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a, - 0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e, - 0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87, - 0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28, - 0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70, - 0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94, - 0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5, - 0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff, - 0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00, + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, + 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, + 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, + 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, + 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, + 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, + 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, + 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, + 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, + 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, + 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, + 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, + 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, + 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, + 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, + 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, + 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, + 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, + 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, + 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, + 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, + 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, + 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, + 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, + 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, + 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, + 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, + 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, + 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, + 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, + 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, + 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, + 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, + 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, + 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, + 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, + 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, + 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, + 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, + 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, + 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, + 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, + 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, + 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, + 0xa6, 0x4a, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto similarity index 71% rename from vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto rename to vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto index a2502fb284a3..2ed04551fad2 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -1,35 +1,21 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. syntax = "proto3"; package grpc.lb.v1; +option go_package = "messages"; message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 @@ -46,7 +32,6 @@ message Duration { } message Timestamp { - // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -59,12 +44,6 @@ message Timestamp { int32 nanos = 2; } -service LoadBalancer { - // Bidirectional rpc to get a list of servers. - rpc BalanceLoad(stream LoadBalanceRequest) - returns (stream LoadBalanceResponse); -} - message LoadBalanceRequest { oneof load_balance_request_type { // This message should be sent on the first request to the load balancer. @@ -142,11 +121,8 @@ message ServerList { // unless instructed otherwise via the client_config. repeated Server servers = 1; - // Indicates the amount of time that the client should consider this server - // list as valid. It may be considered stale after waiting this interval of - // time after receiving the list. If the interval is not positive, the - // client can assume the list is valid until the next list is received. - Duration expiration_interval = 3; + // Was google.protobuf.Duration expiration_interval. + reserved 3; } // Contains server information. When none of the [drop_for_*] fields are true, diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go new file mode 100644 index 000000000000..ebcbe56d8dd8 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/service/service.proto + +/* +Package service is a generated protocol buffer package. + +It is generated from these files: + grpc_lb_v1/service/service.proto + +It has these top-level messages: +*/ +package service + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import grpc_lb_v1 "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for LoadBalancer service + +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*grpc_lb_v1.LoadBalanceRequest) error + Recv() (*grpc_lb_v1.LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *grpc_lb_v1.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*grpc_lb_v1.LoadBalanceResponse, error) { + m := new(grpc_lb_v1.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for LoadBalancer service + +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*grpc_lb_v1.LoadBalanceResponse) error + Recv() (*grpc_lb_v1.LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *grpc_lb_v1.LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*grpc_lb_v1.LoadBalanceRequest, error) { + m := new(grpc_lb_v1.LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_lb_v1/service/service.proto", +} + +func init() { proto.RegisterFile("grpc_lb_v1/service/service.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 142 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x49, 0x8a, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x85, 0xd1, + 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x5c, 0x20, 0x15, 0x7a, 0x39, 0x49, 0x7a, 0x65, 0x86, + 0x52, 0x4a, 0x48, 0xaa, 0x73, 0x53, 0x8b, 0x8b, 0x13, 0xd3, 0x53, 0x8b, 0xe1, 0x0c, 0x88, 0x7a, + 0xa3, 0x24, 0x2e, 0x1e, 0x9f, 0xfc, 0xc4, 0x14, 0xa7, 0xc4, 0x9c, 0xc4, 0xbc, 0xe4, 0xd4, 0x22, + 0xa1, 0x20, 0x2e, 0x6e, 0x28, 0x1b, 0x24, 0x2c, 0x24, 0xa7, 0x87, 0x30, 0x4f, 0x0f, 0x49, 0x61, + 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x3c, 0x4e, 0xf9, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, + 0x54, 0x0d, 0x46, 0x03, 0x46, 0x27, 0xce, 0x28, 0x76, 0xa8, 0x23, 0x93, 0xd8, 0xc0, 0xb6, 0x1a, + 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x39, 0x4e, 0xb0, 0xf8, 0xc9, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto new file mode 100644 index 000000000000..02c1a8b77a11 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto @@ -0,0 +1,26 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.lb.v1; +option go_package = "service"; + +import "grpc_lb_v1/messages/messages.proto"; + +service LoadBalancer { + // Bidirectional rpc to get a list of servers. + rpc BalanceLoad(stream LoadBalanceRequest) + returns (stream LoadBalanceResponse); +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go b/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go deleted file mode 100644 index 65ce6360c9ad..000000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpclb_server_generated.go +++ /dev/null @@ -1,54 +0,0 @@ -// This file contains the generated server side code. -// It's only used for grpclb testing. - -package grpclb - -import ( - "google.golang.org/grpc" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" -) - -// Server API for LoadBalancer service - -type loadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(*loadBalancerBalanceLoadServer) error -} - -func registerLoadBalancerServer(s *grpc.Server, srv loadBalancerServer) { - s.RegisterService( - &grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*loadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: balanceLoadHandler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpclb.proto", - }, srv) -} - -func balanceLoadHandler(srv interface{}, stream grpc.ServerStream) error { - return srv.(loadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream -} - -func (x *loadBalancerBalanceLoadServer) Send(m *lbpb.LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*lbpb.LoadBalanceRequest, error) { - m := new(lbpb.LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} diff --git a/vendor/google.golang.org/grpc/grpclb/grpclb_test.go b/vendor/google.golang.org/grpc/grpclb/grpclb_test.go index 29c8909248a0..46c1fe5b9a32 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpclb_test.go +++ b/vendor/google.golang.org/grpc/grpclb/grpclb_test.go @@ -1,44 +1,32 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -package grpclb +//go:generate protoc --go_out=plugins=:. grpc_lb_v1/messages/messages.proto +//go:generate protoc --go_out=Mgrpc_lb_v1/messages/messages.proto=google.golang.org/grpc/grpclb/grpc_lb_v1/messages,plugins=grpc:. grpc_lb_v1/service/service.proto + +// Package grpclb_test is currently used only for grpclb testing. +package grpclb_test import ( "errors" "fmt" "io" "net" - "strconv" "strings" "sync" "testing" @@ -49,16 +37,24 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + lbspb "google.golang.org/grpc/grpclb/grpc_lb_v1/service" + _ "google.golang.org/grpc/grpclog/glogger" "google.golang.org/grpc/metadata" "google.golang.org/grpc/naming" testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/leakcheck" ) var ( lbsn = "bar.com" besn = "foo.com" lbToken = "iamatoken" + + // Resolver replaces localhost with fakeName in Next(). + // Dialer replaces fakeName with localhost when dialing. + // This will test that custom dialer is passed from Dial to grpclb. + fakeName = "fake.Name" ) type testWatcher struct { @@ -81,6 +77,9 @@ func (w *testWatcher) Next() (updates []*naming.Update, err error) { break } if u != nil { + // Resolver replaces localhost with fakeName in Next(). + // Custom dialer will replace fakeName with localhost when dialing. + u.Addr = strings.Replace(u.Addr, "localhost", fakeName, 1) updates = append(updates, u) } } @@ -89,6 +88,7 @@ func (w *testWatcher) Next() (updates []*naming.Update, err error) { } func (w *testWatcher) Close() { + close(w.side) } // Inject naming resolution updates to the testWatcher. @@ -116,8 +116,8 @@ func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) { r.w.update <- &naming.Update{ Op: naming.Add, Addr: addr, - Metadata: &grpc.AddrMetadataGRPCLB{ - AddrType: grpc.GRPCLB, + Metadata: &naming.AddrMetadataGRPCLB{ + AddrType: naming.GRPCLB, ServerName: lbsn, }, } @@ -135,8 +135,9 @@ func (r *testNameResolver) inject(updates []*naming.Update) { } type serverNameCheckCreds struct { - expected string + mu sync.Mutex sn string + expected string } func (c *serverNameCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { @@ -147,6 +148,8 @@ func (c *serverNameCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, cred return rawConn, nil, nil } func (c *serverNameCheckCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() b := make([]byte, len(c.expected)) if _, err := rawConn.Read(b); err != nil { fmt.Printf("Failed to read the server name from the server %v", err) @@ -159,28 +162,41 @@ func (c *serverNameCheckCreds) ClientHandshake(ctx context.Context, addr string, return rawConn, nil, nil } func (c *serverNameCheckCreds) Info() credentials.ProtocolInfo { + c.mu.Lock() + defer c.mu.Unlock() return credentials.ProtocolInfo{} } func (c *serverNameCheckCreds) Clone() credentials.TransportCredentials { + c.mu.Lock() + defer c.mu.Unlock() return &serverNameCheckCreds{ expected: c.expected, } } func (c *serverNameCheckCreds) OverrideServerName(s string) error { + c.mu.Lock() + defer c.mu.Unlock() c.expected = s return nil } +// fakeNameDialer replaces fakeName with localhost when dialing. +// This will test that custom dialer is passed from Dial to grpclb. +func fakeNameDialer(addr string, timeout time.Duration) (net.Conn, error) { + addr = strings.Replace(addr, fakeName, "localhost", 1) + return net.DialTimeout("tcp", addr, timeout) +} + type remoteBalancer struct { - sls []*lbpb.ServerList + sls []*lbmpb.ServerList intervals []time.Duration statsDura time.Duration done chan struct{} mu sync.Mutex - stats lbpb.ClientStats + stats lbmpb.ClientStats } -func newRemoteBalancer(sls []*lbpb.ServerList, intervals []time.Duration) *remoteBalancer { +func newRemoteBalancer(sls []*lbmpb.ServerList, intervals []time.Duration) *remoteBalancer { return &remoteBalancer{ sls: sls, intervals: intervals, @@ -192,7 +208,7 @@ func (b *remoteBalancer) stop() { close(b.done) } -func (b *remoteBalancer) BalanceLoad(stream *loadBalancerBalanceLoadServer) error { +func (b *remoteBalancer) BalanceLoad(stream lbspb.LoadBalancer_BalanceLoadServer) error { req, err := stream.Recv() if err != nil { return err @@ -201,10 +217,10 @@ func (b *remoteBalancer) BalanceLoad(stream *loadBalancerBalanceLoadServer) erro if initReq.Name != besn { return grpc.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name) } - resp := &lbpb.LoadBalanceResponse{ - LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ - InitialResponse: &lbpb.InitialLoadBalanceResponse{ - ClientStatsReportInterval: &lbpb.Duration{ + resp := &lbmpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbmpb.LoadBalanceResponse_InitialResponse{ + InitialResponse: &lbmpb.InitialLoadBalanceResponse{ + ClientStatsReportInterval: &lbmpb.Duration{ Seconds: int64(b.statsDura.Seconds()), Nanos: int32(b.statsDura.Nanoseconds() - int64(b.statsDura.Seconds())*1e9), }, @@ -217,7 +233,7 @@ func (b *remoteBalancer) BalanceLoad(stream *loadBalancerBalanceLoadServer) erro go func() { for { var ( - req *lbpb.LoadBalanceRequest + req *lbmpb.LoadBalanceRequest err error ) if req, err = stream.Recv(); err != nil { @@ -235,8 +251,8 @@ func (b *remoteBalancer) BalanceLoad(stream *loadBalancerBalanceLoadServer) erro }() for k, v := range b.sls { time.Sleep(b.intervals[k]) - resp = &lbpb.LoadBalanceResponse{ - LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ + resp = &lbmpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbmpb.LoadBalanceResponse_ServerList{ ServerList: v, }, } @@ -317,10 +333,7 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er return } beIPs = append(beIPs, beLis.Addr().(*net.TCPAddr).IP) - - beAddr := strings.Split(beLis.Addr().String(), ":") - bePort, _ := strconv.Atoi(beAddr[1]) - bePorts = append(bePorts, bePort) + bePorts = append(bePorts, beLis.Addr().(*net.TCPAddr).Port) beListeners = append(beListeners, beLis) } @@ -341,7 +354,7 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er return } ls = newRemoteBalancer(nil, nil) - registerLoadBalancerServer(lb, ls) + lbspb.RegisterLoadBalancerServer(lb, ls) go func() { lb.Serve(lbLis) }() @@ -364,50 +377,52 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er } func TestGRPCLB(t *testing.T) { + defer leakcheck.Check(t) tss, cleanup, err := newLoadBalancer(1) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - be := &lbpb.Server{ + be := &lbmpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } - var bes []*lbpb.Server + var bes []*lbmpb.Server bes = append(bes, be) - sl := &lbpb.ServerList{ + sl := &lbmpb.ServerList{ Servers: bes, } - tss.ls.sls = []*lbpb.ServerList{sl} + tss.ls.sls = []*lbmpb.ServerList{sl} tss.ls.intervals = []time.Duration{0} creds := serverNameCheckCreds{ expected: besn, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{ - addrs: []string{tss.lbAddr}, - })), grpc.WithBlock(), grpc.WithTransportCredentials(&creds)) + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } + defer cc.Close() testC := testpb.NewTestServiceClient(cc) - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } - cc.Close() } func TestDropRequest(t *testing.T) { + defer leakcheck.Check(t) tss, cleanup, err := newLoadBalancer(2) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - tss.ls.sls = []*lbpb.ServerList{{ - Servers: []*lbpb.Server{{ + tss.ls.sls = []*lbmpb.ServerList{{ + Servers: []*lbmpb.Server{{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, @@ -425,13 +440,23 @@ func TestDropRequest(t *testing.T) { } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{ - addrs: []string{tss.lbAddr}, - })), grpc.WithBlock(), grpc.WithTransportCredentials(&creds)) + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } + defer cc.Close() testC := testpb.NewTestServiceClient(cc) + // Wait until the first connection is up. + // The first one has Drop set to true, error should contain "drop requests". + for { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + if strings.Contains(err.Error(), "drops requests") { + break + } + } + } // The 1st, non-fail-fast RPC should succeed. This ensures both server // connections are made, because the first one has DropForLoadBalancing set to true. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { @@ -449,110 +474,51 @@ func TestDropRequest(t *testing.T) { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } } - cc.Close() } func TestDropRequestFailedNonFailFast(t *testing.T) { + defer leakcheck.Check(t) tss, cleanup, err := newLoadBalancer(1) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - be := &lbpb.Server{ + be := &lbmpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, DropForLoadBalancing: true, } - var bes []*lbpb.Server + var bes []*lbmpb.Server bes = append(bes, be) - sl := &lbpb.ServerList{ + sl := &lbmpb.ServerList{ Servers: bes, } - tss.ls.sls = []*lbpb.ServerList{sl} + tss.ls.sls = []*lbmpb.ServerList{sl} tss.ls.intervals = []time.Duration{0} creds := serverNameCheckCreds{ expected: besn, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{ - addrs: []string{tss.lbAddr}, - })), grpc.WithBlock(), grpc.WithTransportCredentials(&creds)) + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } + defer cc.Close() testC := testpb.NewTestServiceClient(cc) ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.DeadlineExceeded) } - cc.Close() -} - -func TestServerExpiration(t *testing.T) { - tss, cleanup, err := newLoadBalancer(1) - if err != nil { - t.Fatalf("failed to create new load balancer: %v", err) - } - defer cleanup() - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - exp := &lbpb.Duration{ - Seconds: 0, - Nanos: 100000000, // 100ms - } - var sls []*lbpb.ServerList - sl := &lbpb.ServerList{ - Servers: bes, - ExpirationInterval: exp, - } - sls = append(sls, sl) - sl = &lbpb.ServerList{ - Servers: bes, - } - sls = append(sls, sl) - var intervals []time.Duration - intervals = append(intervals, 0) - intervals = append(intervals, 500*time.Millisecond) - tss.ls.sls = sls - tss.ls.intervals = intervals - creds := serverNameCheckCreds{ - expected: besn, - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{ - addrs: []string{tss.lbAddr}, - })), grpc.WithBlock(), grpc.WithTransportCredentials(&creds)) - if err != nil { - t.Fatalf("Failed to dial to the backend %v", err) - } - testC := testpb.NewTestServiceClient(cc) - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - // Sleep and wake up when the first server list gets expired. - time.Sleep(150 * time.Millisecond) - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable) - } - // A non-failfast rpc should be succeeded after the second server list is received from - // the remote load balancer. - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - cc.Close() } // When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list. func TestBalancerDisconnects(t *testing.T) { + defer leakcheck.Check(t) var ( lbAddrs []string lbs []*grpc.Server @@ -564,17 +530,17 @@ func TestBalancerDisconnects(t *testing.T) { } defer cleanup() - be := &lbpb.Server{ + be := &lbmpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } - var bes []*lbpb.Server + var bes []*lbmpb.Server bes = append(bes, be) - sl := &lbpb.ServerList{ + sl := &lbmpb.ServerList{ Servers: bes, } - tss.ls.sls = []*lbpb.ServerList{sl} + tss.ls.sls = []*lbmpb.ServerList{sl} tss.ls.intervals = []time.Duration{0} lbAddrs = append(lbAddrs, tss.lbAddr) @@ -589,14 +555,17 @@ func TestBalancerDisconnects(t *testing.T) { resolver := &testNameResolver{ addrs: lbAddrs[:2], } - cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(grpc.NewGRPCLBBalancer(resolver)), grpc.WithBlock(), grpc.WithTransportCredentials(&creds)) + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(resolver)), + grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } + defer cc.Close() testC := testpb.NewTestServiceClient(cc) var previousTrailer string trailer := metadata.MD{} - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer)); err != nil { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } else { previousTrailer = trailer[testmdkey][0] @@ -605,7 +574,7 @@ func TestBalancerDisconnects(t *testing.T) { // When lbs[0] is stopped, lbs[1] should be used. lbs[0].Stop() for { - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer)); err != nil { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } else if trailer[testmdkey][0] != previousTrailer { // A new backend server should receive the request. @@ -619,8 +588,8 @@ func TestBalancerDisconnects(t *testing.T) { resolver.inject([]*naming.Update{ {Op: naming.Add, Addr: lbAddrs[2], - Metadata: &grpc.AddrMetadataGRPCLB{ - AddrType: grpc.GRPCLB, + Metadata: &naming.AddrMetadataGRPCLB{ + AddrType: naming.GRPCLB, ServerName: lbsn, }, }, @@ -628,7 +597,7 @@ func TestBalancerDisconnects(t *testing.T) { // Stop lbs[1]. Now lbs[0] and lbs[1] are all stopped. lbs[2] should be used. lbs[1].Stop() for { - if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer)); err != nil { + if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } else if trailer[testmdkey][0] != previousTrailer { // A new backend server should receive the request. @@ -637,7 +606,6 @@ func TestBalancerDisconnects(t *testing.T) { } time.Sleep(100 * time.Millisecond) } - cc.Close() } type failPreRPCCred struct{} @@ -653,21 +621,21 @@ func (failPreRPCCred) RequireTransportSecurity() bool { return false } -func checkStats(stats *lbpb.ClientStats, expected *lbpb.ClientStats) error { +func checkStats(stats *lbmpb.ClientStats, expected *lbmpb.ClientStats) error { if !proto.Equal(stats, expected) { return fmt.Errorf("stats not equal: got %+v, want %+v", stats, expected) } return nil } -func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool, runRPCs func(*grpc.ClientConn)) lbpb.ClientStats { +func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool, runRPCs func(*grpc.ClientConn)) lbmpb.ClientStats { tss, cleanup, err := newLoadBalancer(3) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - tss.ls.sls = []*lbpb.ServerList{{ - Servers: []*lbpb.Server{{ + tss.ls.sls = []*lbmpb.ServerList{{ + Servers: []*lbmpb.Server{{ IpAddress: tss.beIPs[2], Port: int32(tss.bePorts[2]), LoadBalanceToken: lbToken, @@ -681,9 +649,10 @@ func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, besn, grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{ - addrs: []string{tss.lbAddr}, - })), grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithPerRPCCredentials(failPreRPCCred{})) + cc, err := grpc.DialContext(ctx, besn, + grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})), + grpc.WithTransportCredentials(&creds), grpc.WithPerRPCCredentials(failPreRPCCred{}), + grpc.WithBlock(), grpc.WithDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } @@ -700,6 +669,7 @@ func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool const countRPC = 40 func TestGRPCLBStatsUnarySuccess(t *testing.T) { + defer leakcheck.Check(t) stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. @@ -711,7 +681,7 @@ func TestGRPCLBStatsUnarySuccess(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC), NumCallsFinished: int64(countRPC), NumCallsFinishedKnownReceived: int64(countRPC), @@ -721,6 +691,7 @@ func TestGRPCLBStatsUnarySuccess(t *testing.T) { } func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) { + defer leakcheck.Check(t) c := 0 stats := runAndGetStats(t, true, false, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) @@ -737,7 +708,7 @@ func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC + c), NumCallsFinished: int64(countRPC + c), NumCallsFinishedWithDropForLoadBalancing: int64(countRPC + 1), @@ -748,6 +719,7 @@ func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) { } func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) { + defer leakcheck.Check(t) c := 0 stats := runAndGetStats(t, false, true, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) @@ -764,7 +736,7 @@ func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC + c), NumCallsFinished: int64(countRPC + c), NumCallsFinishedWithDropForRateLimiting: int64(countRPC + 1), @@ -775,6 +747,7 @@ func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) { } func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { + defer leakcheck.Check(t) stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. @@ -786,7 +759,7 @@ func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC), NumCallsFinished: int64(countRPC), NumCallsFinishedWithClientFailedToSend: int64(countRPC - 1), @@ -797,6 +770,7 @@ func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { } func TestGRPCLBStatsStreamingSuccess(t *testing.T) { + defer leakcheck.Check(t) stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. @@ -822,7 +796,7 @@ func TestGRPCLBStatsStreamingSuccess(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC), NumCallsFinished: int64(countRPC), NumCallsFinishedKnownReceived: int64(countRPC), @@ -832,6 +806,7 @@ func TestGRPCLBStatsStreamingSuccess(t *testing.T) { } func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) { + defer leakcheck.Check(t) c := 0 stats := runAndGetStats(t, true, false, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) @@ -848,7 +823,7 @@ func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC + c), NumCallsFinished: int64(countRPC + c), NumCallsFinishedWithDropForLoadBalancing: int64(countRPC + 1), @@ -859,6 +834,7 @@ func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) { } func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) { + defer leakcheck.Check(t) c := 0 stats := runAndGetStats(t, false, true, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) @@ -875,7 +851,7 @@ func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC + c), NumCallsFinished: int64(countRPC + c), NumCallsFinishedWithDropForRateLimiting: int64(countRPC + 1), @@ -886,6 +862,7 @@ func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) { } func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) { + defer leakcheck.Check(t) stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. @@ -903,7 +880,7 @@ func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) { } }) - if err := checkStats(&stats, &lbpb.ClientStats{ + if err := checkStats(&stats, &lbmpb.ClientStats{ NumCallsStarted: int64(countRPC), NumCallsFinished: int64(countRPC), NumCallsFinishedWithClientFailedToSend: int64(countRPC - 1), diff --git a/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go b/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go index 43b886cdbddc..e5498f823a12 100644 --- a/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go +++ b/vendor/google.golang.org/grpc/grpclog/glogger/glogger.go @@ -1,39 +1,23 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package glogger defines glog-based logging for grpc. -*/ +// Package glogger defines glog-based logging for grpc. +// Importing this package will install glog as the logger used by grpclog. package glogger import ( @@ -44,31 +28,59 @@ import ( ) func init() { - grpclog.SetLogger(&glogger{}) + grpclog.SetLoggerV2(&glogger{}) } type glogger struct{} -func (g *glogger) Fatal(args ...interface{}) { - glog.FatalDepth(2, args...) +func (g *glogger) Info(args ...interface{}) { + glog.InfoDepth(2, args...) } -func (g *glogger) Fatalf(format string, args ...interface{}) { - glog.FatalDepth(2, fmt.Sprintf(format, args...)) +func (g *glogger) Infoln(args ...interface{}) { + glog.InfoDepth(2, fmt.Sprintln(args...)) } -func (g *glogger) Fatalln(args ...interface{}) { - glog.FatalDepth(2, fmt.Sprintln(args...)) +func (g *glogger) Infof(format string, args ...interface{}) { + glog.InfoDepth(2, fmt.Sprintf(format, args...)) } -func (g *glogger) Print(args ...interface{}) { - glog.InfoDepth(2, args...) +func (g *glogger) Warning(args ...interface{}) { + glog.WarningDepth(2, args...) } -func (g *glogger) Printf(format string, args ...interface{}) { - glog.InfoDepth(2, fmt.Sprintf(format, args...)) +func (g *glogger) Warningln(args ...interface{}) { + glog.WarningDepth(2, fmt.Sprintln(args...)) } -func (g *glogger) Println(args ...interface{}) { - glog.InfoDepth(2, fmt.Sprintln(args...)) +func (g *glogger) Warningf(format string, args ...interface{}) { + glog.WarningDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) Error(args ...interface{}) { + glog.ErrorDepth(2, args...) +} + +func (g *glogger) Errorln(args ...interface{}) { + glog.ErrorDepth(2, fmt.Sprintln(args...)) +} + +func (g *glogger) Errorf(format string, args ...interface{}) { + glog.ErrorDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) Fatal(args ...interface{}) { + glog.FatalDepth(2, args...) +} + +func (g *glogger) Fatalln(args ...interface{}) { + glog.FatalDepth(2, fmt.Sprintln(args...)) +} + +func (g *glogger) Fatalf(format string, args ...interface{}) { + glog.FatalDepth(2, fmt.Sprintf(format, args...)) +} + +func (g *glogger) V(l int) bool { + return bool(glog.V(glog.Level(l))) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000000..16a7d88867e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 3b2933079e9c..d03b2397bfab 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -1,52 +1,25 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package grpclog defines logging for grpc. -*/ -package grpclog // import "google.golang.org/grpc/grpclog" - -import ( - "log" - "os" -) - -// Use golang's standard logger by default. -// Access is not mutex-protected: do not modify except in init() -// functions. -var logger Logger = log.New(os.Stderr, "", log.LstdFlags) +package grpclog // Logger mimics golang's standard Logger as an interface. +// Deprecated: use LoggerV2. type Logger interface { Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) @@ -58,36 +31,53 @@ type Logger interface { // SetLogger sets the logger that is used in grpc. Call only from // init() functions. +// Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = l + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) } -// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. -func Fatal(args ...interface{}) { - logger.Fatal(args...) +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) } -// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) } -// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) } -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -func Print(args ...interface{}) { - logger.Print(args...) +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) } -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) } -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -func Println(args ...interface{}) { - logger.Println(args...) +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000000..d4932577695f --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2_test.go b/vendor/google.golang.org/grpc/grpclog/loggerv2_test.go new file mode 100644 index 000000000000..756f215f9c86 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2_test.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "bytes" + "fmt" + "regexp" + "testing" +) + +func TestLoggerV2Severity(t *testing.T) { + buffers := []*bytes.Buffer{new(bytes.Buffer), new(bytes.Buffer), new(bytes.Buffer)} + SetLoggerV2(NewLoggerV2(buffers[infoLog], buffers[warningLog], buffers[errorLog])) + + Info(severityName[infoLog]) + Warning(severityName[warningLog]) + Error(severityName[errorLog]) + + for i := 0; i < fatalLog; i++ { + buf := buffers[i] + // The content of info buffer should be something like: + // INFO: 2017/04/07 14:55:42 INFO + // WARNING: 2017/04/07 14:55:42 WARNING + // ERROR: 2017/04/07 14:55:42 ERROR + for j := i; j < fatalLog; j++ { + b, err := buf.ReadBytes('\n') + if err != nil { + t.Fatal(err) + } + if err := checkLogForSeverity(j, b); err != nil { + t.Fatal(err) + } + } + } +} + +// check if b is in the format of: +// WARNING: 2017/04/07 14:55:42 WARNING +func checkLogForSeverity(s int, b []byte) error { + expected := regexp.MustCompile(fmt.Sprintf(`^%s: [0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s\n$`, severityName[s], severityName[s])) + if m := expected.Match(b); !m { + return fmt.Errorf("got: %v, want string in format of: %v", string(b), severityName[s]+": 2016/10/05 17:09:26 "+severityName[s]) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 89c4d459f0a2..fdcbb9e0b7db 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: health.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto /* Package grpc_health_v1 is a generated protocol buffer package. It is generated from these files: - health.proto + grpc_health_v1/health.proto It has these top-level messages: HealthCheckRequest @@ -69,6 +68,13 @@ func (m *HealthCheckRequest) String() string { return proto.CompactTe func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + type HealthCheckResponse struct { Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` } @@ -78,6 +84,13 @@ func (m *HealthCheckResponse) String() string { return proto.CompactT func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + func init() { proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") @@ -153,24 +166,25 @@ var _Health_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "health.proto", + Metadata: "grpc_health_v1/health.proto", } -func init() { proto.RegisterFile("health.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, - 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, - 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, - 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, - 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, - 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, - 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, - 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, - 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, - 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, - 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, - 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, - 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, + 0x20, 0x60, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto index e2dc0889258d..6072fdc3b801 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto3"; package grpc.health.v1; diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go index 342552986e2a..c6212f406f7b 100644 --- a/vendor/google.golang.org/grpc/health/health.go +++ b/vendor/google.golang.org/grpc/health/health.go @@ -1,3 +1,23 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto + // Package health provides some utility functions to health-check a server. The implementation // is based on protobuf. Users need to write their own implementations if other IDLs are used. package health diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index a6921614572b..06dc825b9fba 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,15 +27,15 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// This is an EXPERIMENTAL API. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is the EXPERIMENTAL API. +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5489143a85c1..07083832c3c6 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -1,32 +1,17 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/interop/client/client.go b/vendor/google.golang.org/grpc/interop/client/client.go index 38bad3f66db6..da6bc2d20e9e 100644 --- a/vendor/google.golang.org/grpc/interop/client/client.go +++ b/vendor/google.golang.org/grpc/interop/client/client.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -44,9 +29,11 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/testdata" ) var ( + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") @@ -75,9 +62,6 @@ var ( custom_metadata: server will echo custom metadata; unimplemented_method: client attempts to call unimplemented method; unimplemented_service: client attempts to call unimplemented service.`) - - // The test CA root cert file - testCAFile = "testdata/ca.pem" ) func main() { @@ -92,7 +76,10 @@ func main() { var creds credentials.TransportCredentials if *testCA { var err error - creds, err = credentials.NewClientTLSFromFile(testCAFile, sn) + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } @@ -120,6 +107,7 @@ func main() { } else { opts = append(opts, grpc.WithInsecure()) } + opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { grpclog.Fatalf("Fail to dial: %v", err) diff --git a/vendor/google.golang.org/grpc/interop/client/testdata/ca.pem b/vendor/google.golang.org/grpc/interop/client/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/interop/client/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/client/testdata/server1.key b/vendor/google.golang.org/grpc/interop/client/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/interop/client/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/interop/client/testdata/server1.pem b/vendor/google.golang.org/grpc/interop/client/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/interop/client/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go old mode 100755 new mode 100644 index 76ae5643abf3..b03c728c3c20 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_testing/test.proto /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - test.proto + grpc_testing/test.proto It has these top-level messages: Empty @@ -789,7 +788,7 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "test.proto", + Metadata: "grpc_testing/test.proto", } // Client API for UnimplementedService service @@ -855,52 +854,52 @@ var _UnimplementedService_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: fileDescriptor0, + Metadata: "grpc_testing/test.proto", } -func init() { proto.RegisterFile("test.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 649 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x54, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xc5, 0x69, 0x42, 0xda, 0x49, 0x6a, 0xc2, 0x94, 0x0a, 0x37, 0x45, 0x22, 0x32, 0x07, 0x0c, - 0x12, 0x01, 0x45, 0x82, 0x03, 0x12, 0xa0, 0xd2, 0xa6, 0xa2, 0x52, 0xdb, 0x14, 0xbb, 0x39, 0x47, - 0x4b, 0x32, 0x75, 0x2d, 0xf9, 0x0b, 0x7b, 0x5d, 0x91, 0x1e, 0xf8, 0x33, 0xfc, 0x08, 0x0e, 0xfc, - 0x39, 0xb4, 0x6b, 0x3b, 0x71, 0xd2, 0x54, 0x34, 0x7c, 0xdd, 0x76, 0xdf, 0xbe, 0xf9, 0x78, 0x33, - 0xcf, 0x06, 0xe0, 0x14, 0xf3, 0x76, 0x18, 0x05, 0x3c, 0xc0, 0xba, 0x1d, 0x85, 0xc3, 0xb6, 0x00, - 0x1c, 0xdf, 0xd6, 0xab, 0x50, 0xe9, 0x7a, 0x21, 0x1f, 0xeb, 0x87, 0x50, 0x3d, 0x61, 0x63, 0x37, - 0x60, 0x23, 0x7c, 0x06, 0x65, 0x3e, 0x0e, 0x49, 0x53, 0x5a, 0x8a, 0xa1, 0x76, 0xb6, 0xda, 0xc5, - 0x80, 0x76, 0x46, 0x3a, 0x1d, 0x87, 0x64, 0x4a, 0x1a, 0x22, 0x94, 0x3f, 0x05, 0xa3, 0xb1, 0x56, - 0x6a, 0x29, 0x46, 0xdd, 0x94, 0x67, 0xfd, 0x35, 0x40, 0x77, 0x78, 0x1e, 0x58, 0x9c, 0xf1, 0x24, - 0x16, 0x8c, 0x61, 0x30, 0x4a, 0x13, 0x56, 0x4c, 0x79, 0x46, 0x0d, 0xaa, 0x1e, 0xc5, 0x31, 0xb3, - 0x49, 0x06, 0xae, 0x99, 0xf9, 0x55, 0xff, 0x5e, 0x82, 0x75, 0xcb, 0xf1, 0x42, 0x97, 0x4c, 0xfa, - 0x9c, 0x50, 0xcc, 0xf1, 0x2d, 0xac, 0x47, 0x14, 0x87, 0x81, 0x1f, 0xd3, 0xe0, 0x66, 0x9d, 0xd5, - 0x73, 0xbe, 0xb8, 0xe1, 0xa3, 0x42, 0x7c, 0xec, 0x5c, 0xa6, 0x15, 0x2b, 0x53, 0x92, 0xe5, 0x5c, - 0x12, 0x3e, 0x87, 0x6a, 0x98, 0x66, 0xd0, 0x56, 0x5a, 0x8a, 0x51, 0xeb, 0x6c, 0x2e, 0x4c, 0x6f, - 0xe6, 0x2c, 0x91, 0xf5, 0xcc, 0x71, 0xdd, 0x41, 0x12, 0x53, 0xe4, 0x33, 0x8f, 0xb4, 0x72, 0x4b, - 0x31, 0x56, 0xcd, 0xba, 0x00, 0xfb, 0x19, 0x86, 0x06, 0x34, 0x24, 0x29, 0x60, 0x09, 0x3f, 0x1f, - 0xc4, 0xc3, 0x20, 0x24, 0xad, 0x22, 0x79, 0xaa, 0xc0, 0x7b, 0x02, 0xb6, 0x04, 0x8a, 0x3b, 0x70, - 0x67, 0xda, 0xa4, 0x9c, 0x9b, 0x56, 0x95, 0x7d, 0x68, 0xb3, 0x7d, 0x4c, 0xe7, 0x6a, 0xaa, 0x13, - 0x01, 0xf2, 0xae, 0x7f, 0x05, 0x35, 0x1f, 0x5c, 0x8a, 0x17, 0x45, 0x29, 0x37, 0x12, 0xd5, 0x84, - 0xd5, 0x89, 0x9e, 0x74, 0x2f, 0x93, 0x3b, 0x3e, 0x84, 0x5a, 0x51, 0xc6, 0x8a, 0x7c, 0x86, 0x60, - 0x22, 0x41, 0x3f, 0x84, 0x2d, 0x8b, 0x47, 0xc4, 0x3c, 0xc7, 0xb7, 0x0f, 0xfc, 0x30, 0xe1, 0xbb, - 0xcc, 0x75, 0xf3, 0x25, 0x2e, 0xdb, 0x8a, 0x7e, 0x0a, 0xcd, 0x45, 0xd9, 0x32, 0x65, 0xaf, 0xe0, - 0x3e, 0xb3, 0xed, 0x88, 0x6c, 0xc6, 0x69, 0x34, 0xc8, 0x62, 0xd2, 0xed, 0xa6, 0x36, 0xdb, 0x9c, - 0x3e, 0x67, 0xa9, 0xc5, 0x9a, 0xf5, 0x03, 0xc0, 0x3c, 0xc7, 0x09, 0x8b, 0x98, 0x47, 0x9c, 0x22, - 0xe9, 0xd0, 0x42, 0xa8, 0x3c, 0x0b, 0xb9, 0x8e, 0xcf, 0x29, 0xba, 0x60, 0x62, 0xc7, 0x99, 0x67, - 0x20, 0x87, 0xfa, 0xb1, 0xfe, 0xad, 0x54, 0xe8, 0xb0, 0x97, 0xf0, 0x39, 0xc1, 0x7f, 0xea, 0xda, - 0x8f, 0xb0, 0x31, 0x89, 0x0f, 0x27, 0xad, 0x6a, 0xa5, 0xd6, 0x8a, 0x51, 0xeb, 0xb4, 0x66, 0xb3, - 0x5c, 0x95, 0x64, 0x62, 0x74, 0x55, 0xe6, 0xd2, 0x1e, 0xff, 0x0b, 0xa6, 0x3c, 0x86, 0xed, 0x85, - 0x43, 0xfa, 0x4d, 0x87, 0x3e, 0x7d, 0x07, 0xb5, 0xc2, 0xcc, 0xb0, 0x01, 0xf5, 0xdd, 0xde, 0xd1, - 0x89, 0xd9, 0xb5, 0xac, 0x9d, 0xf7, 0x87, 0xdd, 0xc6, 0x2d, 0x44, 0x50, 0xfb, 0xc7, 0x33, 0x98, - 0x82, 0x00, 0xb7, 0xcd, 0x9d, 0xe3, 0xbd, 0xde, 0x51, 0xa3, 0xd4, 0xf9, 0x51, 0x86, 0xda, 0x29, - 0xc5, 0xdc, 0xa2, 0xe8, 0xc2, 0x19, 0x12, 0xbe, 0x84, 0x35, 0xf9, 0x0b, 0x14, 0x6d, 0xe1, 0xc6, - 0x9c, 0x2e, 0xf1, 0xd0, 0x5c, 0x04, 0xe2, 0x3e, 0xac, 0xf5, 0x7d, 0x16, 0xa5, 0x61, 0xdb, 0xb3, - 0x8c, 0x99, 0xdf, 0x57, 0xf3, 0xc1, 0xe2, 0xc7, 0x6c, 0x00, 0x2e, 0x6c, 0x2c, 0x98, 0x0f, 0x1a, - 0x73, 0x41, 0xd7, 0xfa, 0xac, 0xf9, 0xe4, 0x06, 0xcc, 0xb4, 0xd6, 0x0b, 0x05, 0x1d, 0xc0, 0xab, - 0x1f, 0x15, 0x3e, 0xbe, 0x26, 0xc5, 0xfc, 0x47, 0xdc, 0x34, 0x7e, 0x4d, 0x4c, 0x4b, 0x19, 0xa2, - 0x94, 0xba, 0x9f, 0xb8, 0xee, 0x5e, 0x12, 0xba, 0xf4, 0xe5, 0x9f, 0x69, 0x32, 0x14, 0xa9, 0x4a, - 0xfd, 0xc0, 0xdc, 0xb3, 0xff, 0x50, 0xaa, 0xd3, 0x87, 0x7b, 0x7d, 0x5f, 0x6e, 0xd0, 0x23, 0x9f, - 0xd3, 0x28, 0x77, 0xd1, 0x1b, 0xb8, 0x3b, 0x83, 0x2f, 0xe7, 0xa6, 0x9f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xdd, 0xb5, 0x50, 0x6f, 0xa2, 0x07, 0x00, 0x00, + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0x69, 0x42, 0xda, 0x49, 0x6a, 0xc2, 0x94, 0xaa, 0x6e, 0x8a, 0x44, 0x64, 0x0e, 0x18, + 0x24, 0x52, 0x14, 0x09, 0x0e, 0x48, 0x80, 0x4a, 0x9b, 0x8a, 0x4a, 0x6d, 0x53, 0xec, 0xe6, 0x1c, + 0x2d, 0xc9, 0xd4, 0xb5, 0xe4, 0x2f, 0xec, 0x75, 0x45, 0x7a, 0xe0, 0xcf, 0xf0, 0x23, 0x38, 0xf0, + 0xe7, 0xd0, 0xae, 0xed, 0xc4, 0x49, 0x53, 0xd1, 0xf2, 0x75, 0xca, 0xee, 0x9b, 0x37, 0xb3, 0xf3, + 0x66, 0x5e, 0x0c, 0x1b, 0x76, 0x14, 0x0e, 0x07, 0x9c, 0x62, 0xee, 0xf8, 0xf6, 0xb6, 0xf8, 0x6d, + 0x87, 0x51, 0xc0, 0x03, 0xac, 0x8b, 0x40, 0x3b, 0x0b, 0xe8, 0x55, 0xa8, 0x74, 0xbd, 0x90, 0x8f, + 0xf5, 0x43, 0xa8, 0x9e, 0xb0, 0xb1, 0x1b, 0xb0, 0x11, 0x3e, 0x87, 0x32, 0x1f, 0x87, 0xa4, 0x29, + 0x2d, 0xc5, 0x50, 0x3b, 0x9b, 0xed, 0x62, 0x42, 0x3b, 0x23, 0x9d, 0x8e, 0x43, 0x32, 0x25, 0x0d, + 0x11, 0xca, 0x9f, 0x82, 0xd1, 0x58, 0x2b, 0xb5, 0x14, 0xa3, 0x6e, 0xca, 0xb3, 0xfe, 0x1a, 0xa0, + 0x3b, 0x3c, 0x0f, 0x2c, 0xce, 0x78, 0x12, 0x0b, 0xc6, 0x30, 0x18, 0xa5, 0x05, 0x2b, 0xa6, 0x3c, + 0xa3, 0x06, 0x55, 0x8f, 0xe2, 0x98, 0xd9, 0x24, 0x13, 0x57, 0xcc, 0xfc, 0xaa, 0x7f, 0x2f, 0xc1, + 0xaa, 0xe5, 0x78, 0xa1, 0x4b, 0x26, 0x7d, 0x4e, 0x28, 0xe6, 0xf8, 0x16, 0x56, 0x23, 0x8a, 0xc3, + 0xc0, 0x8f, 0x69, 0x70, 0xb3, 0xce, 0xea, 0x39, 0x5f, 0xdc, 0xf0, 0x71, 0x21, 0x3f, 0x76, 0x2e, + 0xd3, 0x17, 0x2b, 0x53, 0x92, 0xe5, 0x5c, 0x12, 0x6e, 0x43, 0x35, 0x4c, 0x2b, 0x68, 0x4b, 0x2d, + 0xc5, 0xa8, 0x75, 0xd6, 0x17, 0x96, 0x37, 0x73, 0x96, 0xa8, 0x7a, 0xe6, 0xb8, 0xee, 0x20, 0x89, + 0x29, 0xf2, 0x99, 0x47, 0x5a, 0xb9, 0xa5, 0x18, 0xcb, 0x66, 0x5d, 0x80, 0xfd, 0x0c, 0x43, 0x03, + 0x1a, 0x92, 0x14, 0xb0, 0x84, 0x9f, 0x0f, 0xe2, 0x61, 0x10, 0x92, 0x56, 0x91, 0x3c, 0x55, 0xe0, + 0x3d, 0x01, 0x5b, 0x02, 0xc5, 0x1d, 0xb8, 0x37, 0x6d, 0x52, 0xce, 0x4d, 0xab, 0xca, 0x3e, 0xb4, + 0xd9, 0x3e, 0xa6, 0x73, 0x35, 0xd5, 0x89, 0x00, 0x79, 0xd7, 0xbf, 0x82, 0x9a, 0x0f, 0x2e, 0xc5, + 0x8b, 0xa2, 0x94, 0x1b, 0x89, 0x6a, 0xc2, 0xf2, 0x44, 0x4f, 0xba, 0x97, 0xc9, 0x1d, 0x1f, 0x41, + 0xad, 0x28, 0x63, 0x49, 0x86, 0x21, 0x98, 0x48, 0xd0, 0x0f, 0x61, 0xd3, 0xe2, 0x11, 0x31, 0xcf, + 0xf1, 0xed, 0x03, 0x3f, 0x4c, 0xf8, 0x2e, 0x73, 0xdd, 0x7c, 0x89, 0xb7, 0x6d, 0x45, 0x3f, 0x85, + 0xe6, 0xa2, 0x6a, 0x99, 0xb2, 0x57, 0xb0, 0xc1, 0x6c, 0x3b, 0x22, 0x9b, 0x71, 0x1a, 0x0d, 0xb2, + 0x9c, 0x74, 0xbb, 0xa9, 0xcd, 0xd6, 0xa7, 0xe1, 0xac, 0xb4, 0x58, 0xb3, 0x7e, 0x00, 0x98, 0xd7, + 0x38, 0x61, 0x11, 0xf3, 0x88, 0x53, 0x24, 0x1d, 0x5a, 0x48, 0x95, 0x67, 0x21, 0xd7, 0xf1, 0x39, + 0x45, 0x17, 0x4c, 0xec, 0x38, 0xf3, 0x0c, 0xe4, 0x50, 0x3f, 0xd6, 0xbf, 0x95, 0x0a, 0x1d, 0xf6, + 0x12, 0x3e, 0x27, 0xf8, 0x4f, 0x5d, 0xfb, 0x11, 0xd6, 0x26, 0xf9, 0xe1, 0xa4, 0x55, 0xad, 0xd4, + 0x5a, 0x32, 0x6a, 0x9d, 0xd6, 0x6c, 0x95, 0xab, 0x92, 0x4c, 0x8c, 0xae, 0xca, 0xbc, 0xb5, 0xc7, + 0xff, 0x82, 0x29, 0x8f, 0x61, 0x6b, 0xe1, 0x90, 0x7e, 0xd3, 0xa1, 0xcf, 0xde, 0x41, 0xad, 0x30, + 0x33, 0x6c, 0x40, 0x7d, 0xb7, 0x77, 0x74, 0x62, 0x76, 0x2d, 0x6b, 0xe7, 0xfd, 0x61, 0xb7, 0x71, + 0x07, 0x11, 0xd4, 0xfe, 0xf1, 0x0c, 0xa6, 0x20, 0xc0, 0x5d, 0x73, 0xe7, 0x78, 0xaf, 0x77, 0xd4, + 0x28, 0x75, 0x7e, 0x94, 0xa1, 0x76, 0x4a, 0x31, 0xb7, 0x28, 0xba, 0x70, 0x86, 0x84, 0x2f, 0x61, + 0x45, 0x7e, 0x02, 0x45, 0x5b, 0xb8, 0x36, 0xa7, 0x4b, 0x04, 0x9a, 0x8b, 0x40, 0xdc, 0x87, 0x95, + 0xbe, 0xcf, 0xa2, 0x34, 0x6d, 0x6b, 0x96, 0x31, 0xf3, 0xf9, 0x6a, 0x3e, 0x5c, 0x1c, 0xcc, 0x06, + 0xe0, 0xc2, 0xda, 0x82, 0xf9, 0xa0, 0x31, 0x97, 0x74, 0xad, 0xcf, 0x9a, 0x4f, 0x6f, 0xc0, 0x4c, + 0xdf, 0x7a, 0xa1, 0xa0, 0x03, 0x78, 0xf5, 0x4f, 0x85, 0x4f, 0xae, 0x29, 0x31, 0xff, 0x27, 0x6e, + 0x1a, 0xbf, 0x26, 0xa6, 0x4f, 0x19, 0xe2, 0x29, 0x75, 0x3f, 0x71, 0xdd, 0xbd, 0x24, 0x74, 0xe9, + 0xcb, 0x3f, 0xd3, 0x64, 0x28, 0x52, 0x95, 0xfa, 0x81, 0xb9, 0x67, 0xff, 0xe1, 0xa9, 0x4e, 0x1f, + 0x1e, 0xf4, 0x7d, 0xb9, 0x41, 0x8f, 0x7c, 0x4e, 0xa3, 0xdc, 0x45, 0x6f, 0xe0, 0xfe, 0x0c, 0x7e, + 0x3b, 0x37, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x15, 0x62, 0x93, 0xba, 0xaf, 0x07, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto index cc2bb74f3b73..20d4366b03fc 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto2"; diff --git a/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go b/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go index 9c09ad7b74b3..5b42b76a9bea 100644 --- a/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go +++ b/vendor/google.golang.org/grpc/interop/http2/negative_http2_client.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * * Client used to test http2 error edge cases like GOAWAYs and RST_STREAMs diff --git a/vendor/google.golang.org/grpc/interop/server/server.go b/vendor/google.golang.org/grpc/interop/server/server.go index 36ebcb6418e6..b833c76f6894 100644 --- a/vendor/google.golang.org/grpc/interop/server/server.go +++ b/vendor/google.golang.org/grpc/interop/server/server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,12 +28,13 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/testdata" ) var ( useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") - certFile = flag.String("tls_cert_file", "testdata/server1.pem", "The TLS cert file") - keyFile = flag.String("tls_key_file", "testdata/server1.key", "The TLS key file") + certFile = flag.String("tls_cert_file", "", "The TLS cert file") + keyFile = flag.String("tls_key_file", "", "The TLS key file") port = flag.Int("port", 10000, "The server port") ) @@ -61,6 +47,12 @@ func main() { } var opts []grpc.ServerOption if *useTLS { + if *certFile == "" { + *certFile = testdata.Path("server1.pem") + } + if *keyFile == "" { + *keyFile = testdata.Path("server1.key") + } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) diff --git a/vendor/google.golang.org/grpc/interop/server/testdata/ca.pem b/vendor/google.golang.org/grpc/interop/server/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/interop/server/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/server/testdata/server1.key b/vendor/google.golang.org/grpc/interop/server/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/interop/server/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/interop/server/testdata/server1.pem b/vendor/google.golang.org/grpc/interop/server/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/interop/server/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go index 15ec00839565..36285d36259f 100644 --- a/vendor/google.golang.org/grpc/interop/test_utils.go +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -1,36 +1,23 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + package interop import ( @@ -391,7 +378,7 @@ func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScop FillOauthScope: proto.Bool(true), } token := GetToken(serviceAccountKeyFile, oauthScope) - kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} + kv := map[string]string{"authorization": token.Type() + " " + token.AccessToken} ctx := metadata.NewOutgoingContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index d492589c96b6..f8adc7e6d4fa 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -39,8 +24,8 @@ import ( ) // ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a connection broken -// and to cause activity so intermediaries are aware the connection is still in use. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. // Make sure these parameters are set in coordination with the keepalive policy on the server, // as incompatible settings can result in closing of connection. type ClientParameters struct { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 5395ca8200d6..ccfea5d4530e 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -1,38 +1,23 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ // Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata. +// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. package metadata // import "google.golang.org/grpc/metadata" import ( @@ -51,8 +36,17 @@ func DecodeKeyValue(k, v string) (string, string, error) { // two convenience functions New and Pairs to generate MD. type MD map[string][]string -// New creates a MD from given key-value map. -// Keys are automatically converted to lowercase. +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. func New(m map[string]string) MD { md := MD{} for k, val := range m { @@ -64,7 +58,16 @@ func New(m map[string]string) MD { // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. -// Keys are automatically converted to lowercase. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) @@ -91,9 +94,9 @@ func (md MD) Copy() MD { return Join(md) } -// Join joins any number of MDs into a single MD. +// Join joins any number of mds into a single MD. // The order of values for each key is determined by the order in which -// the MDs containing those values are presented to Join. +// the mds containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -107,11 +110,6 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated. -func NewContext(ctx context.Context, md MD) context.Context { - return NewOutgoingContext(ctx, md) -} - // NewIncomingContext creates a new context with incoming md attached. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) @@ -122,22 +120,17 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, md) } -// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated. -func FromContext(ctx context.Context) (md MD, ok bool) { - return FromIncomingContext(ctx) -} - -// FromIncomingContext returns the incoming MD in ctx if it exists. The -// returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. func FromIncomingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdIncomingKey{}).(MD) return } -// FromOutgoingContext returns the outgoing MD in ctx if it exists. The -// returned md should be immutable, writing to it may cause races. -// Modification should be made to the copies of the returned md. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to the copies of the returned MD. func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdOutgoingKey{}).(MD) return diff --git a/vendor/google.golang.org/grpc/metadata/metadata_test.go b/vendor/google.golang.org/grpc/metadata/metadata_test.go index bd982ccd888f..210cbb26767b 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata_test.go +++ b/vendor/google.golang.org/grpc/metadata/metadata_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 000000000000..7e69a2ca0a66 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the adrress resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unncessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exisits until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver_test.go b/vendor/google.golang.org/grpc/naming/dns_resolver_test.go new file mode 100644 index 000000000000..be1ac1aeca71 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver_test.go @@ -0,0 +1,315 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "fmt" + "net" + "reflect" + "sync" + "testing" + "time" +) + +func newUpdateWithMD(op Operation, addr, lb string) *Update { + return &Update{ + Op: op, + Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: lb}, + } +} + +func toMap(u []*Update) map[string]*Update { + m := make(map[string]*Update) + for _, v := range u { + m[v.Addr] = v + } + return m +} + +func TestCompileUpdate(t *testing.T) { + tests := []struct { + oldAddrs []string + newAddrs []string + want []*Update + }{ + { + []string{}, + []string{"1.0.0.1"}, + []*Update{{Op: Add, Addr: "1.0.0.1"}}, + }, + { + []string{"1.0.0.1"}, + []string{"1.0.0.1"}, + []*Update{}, + }, + { + []string{"1.0.0.0"}, + []string{"1.0.0.1"}, + []*Update{{Op: Delete, Addr: "1.0.0.0"}, {Op: Add, Addr: "1.0.0.1"}}, + }, + { + []string{"1.0.0.1"}, + []string{"1.0.0.0"}, + []*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}}, + }, + { + []string{"1.0.0.1"}, + []string{"1.0.0.1", "1.0.0.2", "1.0.0.3"}, + []*Update{{Op: Add, Addr: "1.0.0.2"}, {Op: Add, Addr: "1.0.0.3"}}, + }, + { + []string{"1.0.0.1", "1.0.0.2", "1.0.0.3"}, + []string{"1.0.0.0"}, + []*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}, {Op: Delete, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.3"}}, + }, + { + []string{"1.0.0.1", "1.0.0.3", "1.0.0.5"}, + []string{"1.0.0.2", "1.0.0.3", "1.0.0.6"}, + []*Update{{Op: Delete, Addr: "1.0.0.1"}, {Op: Add, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.5"}, {Op: Add, Addr: "1.0.0.6"}}, + }, + { + []string{"1.0.0.1", "1.0.0.1", "1.0.0.2"}, + []string{"1.0.0.1"}, + []*Update{{Op: Delete, Addr: "1.0.0.2"}}, + }, + } + + var w dnsWatcher + for _, c := range tests { + w.curAddrs = make(map[string]*Update) + newUpdates := make(map[string]*Update) + for _, a := range c.oldAddrs { + w.curAddrs[a] = &Update{Addr: a} + } + for _, a := range c.newAddrs { + newUpdates[a] = &Update{Addr: a} + } + r := w.compileUpdate(newUpdates) + if !reflect.DeepEqual(toMap(c.want), toMap(r)) { + t.Errorf("w(%+v).compileUpdate(%+v) = %+v, want %+v", c.oldAddrs, c.newAddrs, updatesToSlice(r), updatesToSlice(c.want)) + } + } +} + +func TestResolveFunc(t *testing.T) { + tests := []struct { + addr string + want error + }{ + // TODO(yuxuanli): More false cases? + {"www.google.com", nil}, + {"foo.bar:12345", nil}, + {"127.0.0.1", nil}, + {"127.0.0.1:12345", nil}, + {"[::1]:80", nil}, + {"[2001:db8:a0b:12f0::1]:21", nil}, + {":80", nil}, + {"127.0.0...1:12345", nil}, + {"[fe80::1%lo0]:80", nil}, + {"golang.org:http", nil}, + {"[2001:db8::1]:http", nil}, + {":", nil}, + {"", errMissingAddr}, + {"[2001:db8:a0b:12f0::1", fmt.Errorf("invalid target address %v", "[2001:db8:a0b:12f0::1")}, + } + + r, err := NewDNSResolver() + if err != nil { + t.Errorf("%v", err) + } + for _, v := range tests { + _, err := r.Resolve(v.addr) + if !reflect.DeepEqual(err, v.want) { + t.Errorf("Resolve(%q) = %v, want %v", v.addr, err, v.want) + } + } +} + +var hostLookupTbl = map[string][]string{ + "foo.bar.com": {"1.2.3.4", "5.6.7.8"}, + "ipv4.single.fake": {"1.2.3.4"}, + "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, + "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, + "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, +} + +func hostLookup(host string) ([]string, error) { + if addrs, ok := hostLookupTbl[host]; ok { + return addrs, nil + } + return nil, fmt.Errorf("failed to lookup host:%s resolution in hostLookupTbl", host) +} + +var srvLookupTbl = map[string][]*net.SRV{ + "_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, +} + +func srvLookup(service, proto, name string) (string, []*net.SRV, error) { + cname := "_" + service + "._" + proto + "." + name + if srvs, ok := srvLookupTbl[cname]; ok { + return cname, srvs, nil + } + return "", nil, fmt.Errorf("failed to lookup srv record for %s in srvLookupTbl", cname) +} + +func updatesToSlice(updates []*Update) []Update { + res := make([]Update, len(updates)) + for i, u := range updates { + res[i] = *u + } + return res +} + +func testResolver(t *testing.T, freq time.Duration, slp time.Duration) { + tests := []struct { + target string + want []*Update + }{ + { + "foo.bar.com", + []*Update{{Op: Add, Addr: "1.2.3.4" + colonDefaultPort}, {Op: Add, Addr: "5.6.7.8" + colonDefaultPort}}, + }, + { + "foo.bar.com:1234", + []*Update{{Op: Add, Addr: "1.2.3.4:1234"}, {Op: Add, Addr: "5.6.7.8:1234"}}, + }, + { + "srv.ipv4.single.fake", + []*Update{newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.single.fake")}, + }, + { + "srv.ipv4.multi.fake", + []*Update{ + newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.multi.fake"), + newUpdateWithMD(Add, "5.6.7.8:1234", "ipv4.multi.fake"), + newUpdateWithMD(Add, "9.10.11.12:1234", "ipv4.multi.fake")}, + }, + { + "srv.ipv6.single.fake", + []*Update{newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.single.fake")}, + }, + { + "srv.ipv6.multi.fake", + []*Update{ + newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.multi.fake"), + newUpdateWithMD(Add, "[2607:f8b0:400a:801::1002]:1234", "ipv6.multi.fake"), + newUpdateWithMD(Add, "[2607:f8b0:400a:801::1003]:1234", "ipv6.multi.fake"), + }, + }, + } + + for _, a := range tests { + r, err := NewDNSResolverWithFreq(freq) + if err != nil { + t.Fatalf("%v\n", err) + } + w, err := r.Resolve(a.target) + if err != nil { + t.Fatalf("%v\n", err) + } + updates, err := w.Next() + if err != nil { + t.Fatalf("%v\n", err) + } + if !reflect.DeepEqual(toMap(a.want), toMap(updates)) { + t.Errorf("Resolve(%q) = %+v, want %+v\n", a.target, updatesToSlice(updates), updatesToSlice(a.want)) + } + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + _, err := w.Next() + if err != nil { + return + } + t.Error("Execution shouldn't reach here, since w.Next() should be blocked until close happen.") + } + }() + // Sleep for sometime to let watcher do more than one lookup + time.Sleep(slp) + w.Close() + wg.Wait() + } +} + +func TestResolve(t *testing.T) { + defer replaceNetFunc()() + testResolver(t, time.Millisecond*5, time.Millisecond*10) +} + +const colonDefaultPort = ":" + defaultPort + +func TestIPWatcher(t *testing.T) { + tests := []struct { + target string + want []*Update + }{ + {"127.0.0.1", []*Update{{Op: Add, Addr: "127.0.0.1" + colonDefaultPort}}}, + {"127.0.0.1:12345", []*Update{{Op: Add, Addr: "127.0.0.1:12345"}}}, + {"::1", []*Update{{Op: Add, Addr: "[::1]" + colonDefaultPort}}}, + {"[::1]:12345", []*Update{{Op: Add, Addr: "[::1]:12345"}}}, + {"[::1]:", []*Update{{Op: Add, Addr: "[::1]:443"}}}, + {"2001:db8:85a3::8a2e:370:7334", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]:12345", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}}, + {"[2001:db8::1]:http", []*Update{{Op: Add, Addr: "[2001:db8::1]:http"}}}, + // TODO(yuxuanli): zone support? + } + + for _, v := range tests { + r, err := NewDNSResolverWithFreq(time.Millisecond * 5) + if err != nil { + t.Fatalf("%v\n", err) + } + w, err := r.Resolve(v.target) + if err != nil { + t.Fatalf("%v\n", err) + } + var updates []*Update + var wg sync.WaitGroup + wg.Add(1) + count := 0 + go func() { + defer wg.Done() + for { + u, err := w.Next() + if err != nil { + return + } + updates = u + count++ + } + }() + // Sleep for sometime to let watcher do more than one lookup + time.Sleep(time.Millisecond * 10) + w.Close() + wg.Wait() + if !reflect.DeepEqual(v.want, updates) { + t.Errorf("Resolve(%q) = %v, want %+v\n", v.target, updatesToSlice(updates), updatesToSlice(v.want)) + } + if count != 1 { + t.Errorf("IPWatcher Next() should return only once, not %d times\n", count) + } + } +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 000000000000..8bdf21e79989 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.7, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go17_test.go b/vendor/google.golang.org/grpc/naming/go17_test.go new file mode 100644 index 000000000000..d1de221a5ecc --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17_test.go @@ -0,0 +1,42 @@ +// +build go1.7, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + } +} diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 000000000000..b5a0f842748f --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/go18_test.go b/vendor/google.golang.org/grpc/naming/go18_test.go new file mode 100644 index 000000000000..5e297539ba57 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18_test.go @@ -0,0 +1,41 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "net" +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + } +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index c2e0871e6f8b..1af7e32f86d0 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index bfa6205ba9ea..317b8b9d09a5 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,7 +27,8 @@ import ( "google.golang.org/grpc/credentials" ) -// Peer contains the information of the peer for an RPC. +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. type Peer struct { // Addr is the peer address. Addr net.Addr diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000000..9085dbc9c98b --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, put, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + return t, put, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper_test.go b/vendor/google.golang.org/grpc/picker_wrapper_test.go new file mode 100644 index 000000000000..23bc8f24333a --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper_test.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + _ "google.golang.org/grpc/grpclog/glogger" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/transport" +) + +const goroutineCount = 5 + +var ( + testT = &testTransport{} + testSC = &acBalancerWrapper{ac: &addrConn{ + state: connectivity.Ready, + transport: testT, + }} + testSCNotReady = &acBalancerWrapper{ac: &addrConn{ + state: connectivity.TransientFailure, + }} +) + +type testTransport struct { + transport.ClientTransport +} + +type testingPicker struct { + err error + sc balancer.SubConn + maxCalled int64 +} + +func (p *testingPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if atomic.AddInt64(&p.maxCalled, -1) < 0 { + return nil, nil, fmt.Errorf("Pick called to many times (> goroutineCount)") + } + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func TestBlockingPickTimeout(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + if _, _, err := bp.pick(ctx, true, balancer.PickOptions{}); err != context.DeadlineExceeded { + t.Errorf("bp.pick returned error %v, want DeadlineExceeded", err) + } +} + +func TestBlockingPick(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + // All goroutines should block because picker is nil in bp. + var finishedCount uint64 + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(50 * time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} + +func TestBlockingPickNoSubAvailable(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + var finishedCount uint64 + bp.updatePicker(&testingPicker{err: balancer.ErrNoSubConnAvailable, maxCalled: goroutineCount}) + // All goroutines should block because picker returns no sc avilable. + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(50 * time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} + +func TestBlockingPickTransientWaitforready(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + bp.updatePicker(&testingPicker{err: balancer.ErrTransientFailure, maxCalled: goroutineCount}) + var finishedCount uint64 + // All goroutines should block because picker returns transientFailure and + // picks are not failfast. + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), false, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} + +func TestBlockingPickSCNotReady(t *testing.T) { + defer leakcheck.Check(t) + bp := newPickerWrapper() + bp.updatePicker(&testingPicker{sc: testSCNotReady, maxCalled: goroutineCount}) + var finishedCount uint64 + // All goroutines should block because sc is not ready. + for i := goroutineCount; i > 0; i-- { + go func() { + if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT { + t.Errorf("bp.pick returned non-nil error: %v", err) + } + atomic.AddUint64(&finishedCount, 1) + }() + } + time.Sleep(time.Millisecond) + if c := atomic.LoadUint64(&finishedCount); c != 0 { + t.Errorf("finished goroutines count: %v, want 0", c) + } + bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000000..7f993ef5a381 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return "pickfirst" +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + } else { + b.sc.UpdateAddresses(addrs) + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc || s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/pickfirst_test.go b/vendor/google.golang.org/grpc/pickfirst_test.go new file mode 100644 index 000000000000..e58b3422c149 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst_test.go @@ -0,0 +1,352 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/test/leakcheck" +) + +func TestOneBackendPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 1 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}}) + // The second RPC should succeed. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) +} + +func TestBackendsPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 2 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}) + // The second RPC should succeed with the first server. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) +} + +func TestNewAddressWhileBlockingPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 1 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until NewAddress is called. + Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + }() + } + time.Sleep(50 * time.Millisecond) + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}}) + wg.Wait() +} + +func TestCloseWithPendingRPCPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 1 + _, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // This RPC blocks until NewAddress is called. + Invoke(context.Background(), "/foo/bar", &req, &reply, cc) + }() + } + time.Sleep(50 * time.Millisecond) + cc.Close() + wg.Wait() +} + +func TestOneServerDownPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 2 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}) + // The second RPC should succeed with the first server. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + + servers[0].stop() + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) +} + +func TestAllServersDownPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 2 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}) + // The second RPC should succeed with the first server. + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + + for i := 0; i < numServers; i++ { + servers[i].stop() + } + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); Code(err) == codes.Unavailable { + return + } + time.Sleep(time.Millisecond) + } + t.Fatalf("EmptyCall() = _, %v, want _, error with code unavailable", err) +} + +func TestAddressesRemovedPickfirst(t *testing.T) { + defer leakcheck.Check(t) + r, rcleanup := manual.GenerateAndRegisterManualResolver() + defer rcleanup() + + numServers := 3 + servers, _, scleanup := startServers(t, numServers, math.MaxInt32) + defer scleanup() + + cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{})) + if err != nil { + t.Fatalf("failed to dial: %v", err) + } + defer cc.Close() + // The first RPC should fail because there's no address. + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + req := "port" + var reply string + if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) + } + + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}, {Addr: servers[2].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Remove server[0]. + r.NewAddress([]resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Append server[0], nothing should change. + r.NewAddress([]resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}, {Addr: servers[0].addr}}) + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Remove server[1]. + r.NewAddress([]resolver.Address{{Addr: servers[2].addr}, {Addr: servers[0].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[2].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) + } + time.Sleep(10 * time.Millisecond) + } + + // Remove server[2]. + r.NewAddress([]resolver.Address{{Addr: servers[0].addr}}) + for i := 0; i < 1000; i++ { + if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port { + break + } + time.Sleep(time.Millisecond) + } + for i := 0; i < 20; i++ { + if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port { + t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) + } + time.Sleep(10 * time.Millisecond) + } +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go index 10188dc34336..3e17efec61bd 100644 --- a/vendor/google.golang.org/grpc/proxy.go +++ b/vendor/google.golang.org/grpc/proxy.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -97,7 +82,8 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ Header: map[string][]string{"User-Agent": {grpcUA}}, }) - if err := sendHTTPRequest(ctx, req, conn); err != nil { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } diff --git a/vendor/google.golang.org/grpc/proxy_test.go b/vendor/google.golang.org/grpc/proxy_test.go index 846b396b9119..39ee123cca9a 100644 --- a/vendor/google.golang.org/grpc/proxy_test.go +++ b/vendor/google.golang.org/grpc/proxy_test.go @@ -1,33 +1,20 @@ +// +build !race + /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,6 +30,7 @@ import ( "time" "golang.org/x/net/context" + "google.golang.org/grpc/test/leakcheck" ) const ( @@ -60,29 +48,6 @@ func overwrite(hpfe func(req *http.Request) (*url.URL, error)) func() { } } -func TestMapAddressEnv(t *testing.T) { - // Overwrite the function in the test and restore them in defer. - hpfe := func(req *http.Request) (*url.URL, error) { - if req.URL.Host == envTestAddr { - return &url.URL{ - Scheme: "https", - Host: envProxyAddr, - }, nil - } - return nil, nil - } - defer overwrite(hpfe)() - - // envTestAddr should be handled by ProxyFromEnvironment. - got, err := mapAddress(context.Background(), envTestAddr) - if err != nil { - t.Error(err) - } - if got != envProxyAddr { - t.Errorf("want %v, got %v", envProxyAddr, got) - } -} - type proxyServer struct { t *testing.T lis net.Listener @@ -133,7 +98,8 @@ func (p *proxyServer) stop() { } func TestHTTPConnect(t *testing.T) { - plis, err := net.Listen("tcp", ":0") + defer leakcheck.Check(t) + plis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } @@ -141,7 +107,7 @@ func TestHTTPConnect(t *testing.T) { go p.run() defer p.stop() - blis, err := net.Listen("tcp", ":0") + blis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } @@ -190,3 +156,27 @@ func TestHTTPConnect(t *testing.T) { t.Fatalf("received msg: %v, want %v", recvBuf, msg) } } + +func TestMapAddressEnv(t *testing.T) { + defer leakcheck.Check(t) + // Overwrite the function in the test and restore them in defer. + hpfe := func(req *http.Request) (*url.URL, error) { + if req.URL.Host == envTestAddr { + return &url.URL{ + Scheme: "https", + Host: envProxyAddr, + }, nil + } + return nil, nil + } + defer overwrite(hpfe)() + + // envTestAddr should be handled by ProxyFromEnvironment. + got, err := mapAddress(context.Background(), envTestAddr) + if err != nil { + t.Error(err) + } + if got != envProxyAddr { + t.Errorf("want %v, got %v", envProxyAddr, got) + } +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 76987a420958..b40725327480 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: reflection.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_reflection_v1alpha/reflection.proto /* Package grpc_reflection_v1alpha is a generated protocol buffer package. It is generated from these files: - reflection.proto + grpc_reflection_v1alpha/reflection.proto It has these top-level messages: ServerReflectionRequest @@ -94,6 +93,13 @@ func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_ return nil } +func (m *ServerReflectionRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + func (m *ServerReflectionRequest) GetFileByFilename() string { if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { return x.FileByFilename @@ -257,11 +263,25 @@ func (m *ExtensionRequest) String() string { return proto.CompactText func (*ExtensionRequest) ProtoMessage() {} func (*ExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *ExtensionRequest) GetContainingType() string { + if m != nil { + return m.ContainingType + } + return "" +} + +func (m *ExtensionRequest) GetExtensionNumber() int32 { + if m != nil { + return m.ExtensionNumber + } + return 0 +} + // The message sent by the server to answer ServerReflectionInfo method. type ServerReflectionResponse struct { ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` - // The server set one of the following fields accroding to the message_request + // The server set one of the following fields according to the message_request // in the request. // // Types that are valid to be assigned to MessageResponse: @@ -307,6 +327,13 @@ func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionRespon return nil } +func (m *ServerReflectionResponse) GetValidHost() string { + if m != nil { + return m.ValidHost + } + return "" +} + func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { if m != nil { return m.OriginalRequest @@ -469,13 +496,20 @@ func (m *FileDescriptorResponse) String() string { return proto.Compa func (*FileDescriptorResponse) ProtoMessage() {} func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if m != nil { + return m.FileDescriptorProto + } + return nil +} + // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. type ExtensionNumberResponse struct { // Full name of the base type, including the package name. The format // is . BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName" json:"base_type_name,omitempty"` - ExtensionNumber []int32 `protobuf:"varint,2,rep,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` } func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } @@ -483,6 +517,20 @@ func (m *ExtensionNumberResponse) String() string { return proto.Comp func (*ExtensionNumberResponse) ProtoMessage() {} func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *ExtensionNumberResponse) GetBaseTypeName() string { + if m != nil { + return m.BaseTypeName + } + return "" +} + +func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if m != nil { + return m.ExtensionNumber + } + return nil +} + // A list of ServiceResponse sent by the server answering list_services request. type ListServiceResponse struct { // The information of each service may be expanded in the future, so we use @@ -515,6 +563,13 @@ func (m *ServiceResponse) String() string { return proto.CompactTextS func (*ServiceResponse) ProtoMessage() {} func (*ServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (m *ServiceResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + // The error code and error message sent by the server when an error occurs. type ErrorResponse struct { // This field uses the error codes defined in grpc::StatusCode. @@ -527,6 +582,20 @@ func (m *ErrorResponse) String() string { return proto.CompactTextStr func (*ErrorResponse) ProtoMessage() {} func (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *ErrorResponse) GetErrorCode() int32 { + if m != nil { + return m.ErrorCode + } + return 0 +} + +func (m *ErrorResponse) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + func init() { proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") @@ -643,52 +712,52 @@ var _ServerReflection_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "reflection.proto", + Metadata: "grpc_reflection_v1alpha/reflection.proto", } -func init() { proto.RegisterFile("reflection.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 646 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, - 0x10, 0xfd, 0xd2, 0xe6, 0x47, 0x99, 0xfc, 0xf9, 0xdb, 0x86, 0xc4, 0x05, 0x15, 0x21, 0x43, 0x21, - 0x45, 0x28, 0xb4, 0x46, 0xe2, 0x01, 0x52, 0x40, 0x45, 0x2a, 0x2d, 0x72, 0xb8, 0x41, 0x5c, 0x58, - 0x8e, 0xb3, 0x4e, 0x0d, 0x8e, 0xd7, 0xec, 0xba, 0x81, 0x5c, 0xf1, 0x10, 0x3c, 0x14, 0xaf, 0xc4, - 0x25, 0xbb, 0xeb, 0x9f, 0x38, 0xae, 0x0d, 0xea, 0x95, 0xad, 0xb3, 0x33, 0x7b, 0x66, 0xe6, 0x9c, - 0x59, 0x50, 0x28, 0x76, 0x3c, 0x6c, 0x87, 0x2e, 0xf1, 0xc7, 0x01, 0x25, 0x21, 0x41, 0xc3, 0x05, - 0x0d, 0xec, 0x71, 0x06, 0x5e, 0x9d, 0x58, 0x5e, 0x70, 0x65, 0x69, 0xbf, 0x77, 0x60, 0x38, 0xc5, - 0x74, 0x85, 0xa9, 0x91, 0x1e, 0x1a, 0xf8, 0xeb, 0x35, 0x66, 0x21, 0x42, 0x50, 0xbd, 0x22, 0x2c, - 0x54, 0x2b, 0x0f, 0x2a, 0xa3, 0xa6, 0x21, 0xff, 0xd1, 0x53, 0x50, 0x1c, 0xd7, 0xc3, 0xe6, 0x6c, - 0x6d, 0x8a, 0xaf, 0x6f, 0x2d, 0xb1, 0xba, 0x2b, 0xce, 0xcf, 0xfe, 0x33, 0xba, 0x02, 0x99, 0xac, - 0xdf, 0xc4, 0x38, 0x7a, 0x09, 0x03, 0x19, 0x6b, 0x13, 0x3f, 0xb4, 0x5c, 0xdf, 0xf5, 0x17, 0x26, - 0x5b, 0x2f, 0x67, 0xc4, 0x53, 0xab, 0x71, 0x46, 0x5f, 0x9c, 0x9f, 0xa6, 0xc7, 0x53, 0x79, 0x8a, - 0x16, 0xb0, 0x9f, 0xcf, 0xc3, 0xdf, 0x43, 0xec, 0x33, 0x5e, 0x9b, 0x5a, 0xe3, 0xa9, 0x2d, 0xfd, - 0x68, 0x5c, 0xd2, 0xd0, 0xf8, 0x75, 0x12, 0x19, 0x77, 0xc1, 0x59, 0x86, 0xdb, 0x2c, 0x69, 0x04, - 0x9a, 0xc0, 0x81, 0xe5, 0x79, 0x9b, 0xcb, 0x4d, 0xff, 0x7a, 0x39, 0xc3, 0x94, 0x99, 0xc4, 0x31, - 0xc3, 0x75, 0x80, 0xd5, 0x7a, 0x5c, 0xe7, 0x3e, 0x0f, 0x4b, 0xd3, 0x2e, 0xa2, 0xa0, 0x4b, 0xe7, - 0x03, 0x0f, 0x41, 0x87, 0xd0, 0xf1, 0x5c, 0x16, 0x9a, 0x8c, 0x0f, 0xd1, 0xb5, 0x31, 0x53, 0x1b, - 0x71, 0x4e, 0x5b, 0xc0, 0xd3, 0x18, 0x9d, 0xfc, 0x0f, 0xbd, 0x25, 0x66, 0xcc, 0x5a, 0x60, 0x93, - 0x46, 0x85, 0x69, 0x0e, 0x28, 0xf9, 0x62, 0xd1, 0x13, 0xe8, 0x65, 0xba, 0x96, 0x35, 0x44, 0xd3, - 0xef, 0x6e, 0x60, 0x49, 0x7b, 0x04, 0x4a, 0xbe, 0x6c, 0x75, 0x87, 0x47, 0xd6, 0x8c, 0x1e, 0xde, - 0x2e, 0x54, 0xfb, 0x55, 0x05, 0xf5, 0xa6, 0xc4, 0x2c, 0x20, 0x3e, 0xc3, 0xe8, 0x00, 0x60, 0x65, - 0x79, 0xee, 0xdc, 0xcc, 0x28, 0xdd, 0x94, 0xc8, 0x99, 0x90, 0xfb, 0x13, 0x28, 0x84, 0xba, 0x0b, - 0xd7, 0xb7, 0xbc, 0xa4, 0x6e, 0x49, 0xd3, 0xd2, 0x8f, 0x4b, 0x15, 0x28, 0xb1, 0x93, 0xd1, 0x4b, - 0x6e, 0x4a, 0x9a, 0xfd, 0x02, 0xaa, 0xd4, 0x79, 0x8e, 0x99, 0x4d, 0xdd, 0x20, 0x24, 0x94, 0x73, - 0x44, 0x75, 0x49, 0x87, 0xb4, 0xf4, 0xe7, 0xa5, 0x24, 0xc2, 0x64, 0xaf, 0xd2, 0xbc, 0xa4, 0x1d, - 0x3e, 0x76, 0x69, 0xb9, 0x9b, 0x27, 0xe8, 0x1b, 0xdc, 0x2f, 0xd6, 0x3a, 0xa5, 0xac, 0xfd, 0xa3, - 0xaf, 0x9c, 0x01, 0x32, 0x9c, 0xf7, 0x0a, 0xec, 0x91, 0x12, 0xcf, 0x61, 0xb0, 0x65, 0x90, 0x0d, - 0x61, 0x5d, 0x12, 0x3e, 0x2b, 0x25, 0x3c, 0xdf, 0x18, 0x28, 0x43, 0xd6, 0xcf, 0xfa, 0x2a, 0x65, - 0xb9, 0x84, 0x2e, 0xa6, 0x34, 0x3b, 0xc1, 0x86, 0xbc, 0xfd, 0x71, 0x79, 0x3b, 0x22, 0x3c, 0x73, - 0x6f, 0x07, 0x67, 0x81, 0x09, 0x02, 0x65, 0x63, 0xd8, 0x08, 0xd3, 0xce, 0x61, 0x50, 0x3c, 0x77, - 0xa4, 0xc3, 0x9d, 0xbc, 0x94, 0xf2, 0xe1, 0xe1, 0x8e, 0xda, 0x1d, 0xb5, 0x8d, 0xbd, 0x6d, 0x51, - 0xde, 0x8b, 0x23, 0xed, 0x33, 0x0c, 0x4b, 0x46, 0x8a, 0x1e, 0x41, 0x77, 0x66, 0x31, 0x2c, 0x17, - 0xc0, 0x94, 0x6f, 0x4c, 0xe4, 0xcc, 0xb6, 0x40, 0x85, 0xff, 0x2f, 0xc4, 0xfb, 0x52, 0xbc, 0x03, - 0xbb, 0x45, 0x3b, 0xf0, 0x11, 0xf6, 0x0a, 0xa6, 0xc9, 0x1f, 0x80, 0x46, 0x2c, 0x8b, 0x2c, 0xb4, - 0xa5, 0x8f, 0xfe, 0xea, 0xea, 0x4c, 0xaa, 0x91, 0x24, 0x6a, 0x87, 0xd0, 0xcb, 0x5f, 0xcb, 0x1f, - 0xce, 0x4c, 0xd1, 0xf2, 0x5f, 0x9b, 0x42, 0x67, 0x6b, 0xe2, 0x62, 0xf3, 0x22, 0xc5, 0x6c, 0x32, - 0x8f, 0x42, 0x6b, 0x46, 0x53, 0x22, 0xa7, 0x1c, 0x40, 0x0f, 0x21, 0x12, 0xc4, 0x8c, 0x55, 0x90, - 0x6b, 0xc7, 0x27, 0x20, 0xc1, 0x77, 0x11, 0xa6, 0xff, 0xac, 0x80, 0x92, 0x5f, 0x37, 0xf4, 0x03, - 0xfa, 0x79, 0xec, 0xad, 0xef, 0x10, 0x74, 0xeb, 0x8d, 0xbd, 0x7b, 0x72, 0x8b, 0x8c, 0xa8, 0xab, - 0x51, 0xe5, 0xb8, 0x32, 0xab, 0x4b, 0xe9, 0x5f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x3f, - 0x7b, 0x08, 0x87, 0x06, 0x00, 0x00, + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40, + 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75, + 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e, + 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e, + 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7, + 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7, + 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e, + 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94, + 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43, + 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36, + 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d, + 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3, + 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd, + 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde, + 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd, + 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc, + 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8, + 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f, + 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33, + 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb, + 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58, + 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a, + 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce, + 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e, + 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25, + 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d, + 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8, + 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62, + 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77, + 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97, + 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0, + 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39, + 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2, + 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad, + 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29, + 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0, + 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73, + 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d, + 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6, + 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5, + 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto index 276ff0e255d9..c52ccc6ab2b4 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -1,31 +1,16 @@ -// Copyright 2016, Google Inc. -// All rights reserved. +// Copyright 2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Service exported by server reflection @@ -87,7 +72,7 @@ message ExtensionRequest { message ServerReflectionResponse { string valid_host = 1; ServerReflectionRequest original_request = 2; - // The server set one of the following fields accroding to the message_request + // The server set one of the following fields according to the message_request // in the request. oneof message_response { // This message is used to answer file_by_filename, file_containing_symbol, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go index 0b503d665fcb..5b0161885945 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto2.proto -// DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. @@ -69,7 +68,7 @@ func init() { proto.RegisterFile("proto2.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 86 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, 0x99, 0x79, 0xe9, 0x4a, 0x6a, 0x5c, 0x3c, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, 0x29, 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto index 6b120f320c89..a675d143d713 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto2"; package grpc.testing; diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go index dbd09425feda..9ae4f07af1aa 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto2_ext.proto -// DO NOT EDIT! package grpc_testing @@ -68,7 +67,7 @@ func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ // 179 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x29, 0x2e, 0x90, 0x30, 0x84, 0xad, 0xa4, 0xca, 0xc5, 0xe9, 0x5a, 0x51, 0x92, 0x9a, 0x57, 0x9c, 0x99, 0x9f, 0x27, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto index b669141f4e25..a4942e481b43 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto2"; package grpc.testing; diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go index 0aaec7c9a6cd..26ec982aa46a 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto2_ext2.proto -// DO NOT EDIT! package grpc_testing @@ -58,7 +57,7 @@ func init() { proto.RegisterFile("proto2_ext2.proto", fileDescriptor2) } var fileDescriptor2 = []byte{ // 165 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x28, 0xca, 0x2f, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0x31, 0xd2, 0x03, 0xb3, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0x0a, 0x20, 0x72, 0x4a, 0x36, 0x5c, 0x02, 0x8e, 0x79, 0xf9, 0x25, 0x19, 0xa9, 0x45, 0xae, 0x15, 0x25, 0xa9, 0x79, 0xc5, 0x99, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto index 16fa69e62e5a..d91ba0061914 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto2"; package grpc.testing; diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go index 27d71fc89e4e..62f71ff15842 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: test.proto -// DO NOT EDIT! package grpc_testing @@ -230,7 +229,7 @@ func init() { proto.RegisterFile("test.proto", fileDescriptor3) } var fileDescriptor3 = []byte{ // 231 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x91, 0xbd, 0x4a, 0xc5, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xbd, 0x4a, 0xc5, 0x40, 0x10, 0x85, 0x59, 0x83, 0xd1, 0x3b, 0xfe, 0x32, 0x58, 0x84, 0x68, 0x11, 0xae, 0x08, 0xa9, 0x16, 0xb9, 0xd6, 0x56, 0xb6, 0x16, 0xb2, 0x79, 0x82, 0x6b, 0x18, 0xe2, 0x42, 0x4c, 0x36, 0x33, 0x13, 0xc1, 0x87, 0xb1, 0xf5, 0x39, 0x25, 0x59, 0x23, 0x0a, 0x62, 0x63, 0xb7, 0xe7, 0xe3, 0xcc, 0xb7, diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto index d74d0fc13369..cae3f01a0438 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto3"; package grpc.testing; diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go new file mode 100644 index 000000000000..78876aae78a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. +// source: testv3.proto +// DO NOT EDIT! + +/* +Package grpc_testingv3 is a generated protocol buffer package. + +It is generated from these files: + testv3.proto + +It has these top-level messages: + SearchResponseV3 + SearchRequestV3 +*/ +package grpc_testingv3 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SearchResponseV3 struct { + Results []*SearchResponseV3_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *SearchResponseV3) Reset() { *m = SearchResponseV3{} } +func (m *SearchResponseV3) String() string { return proto.CompactTextString(m) } +func (*SearchResponseV3) ProtoMessage() {} +func (*SearchResponseV3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *SearchResponseV3) GetResults() []*SearchResponseV3_Result { + if m != nil { + return m.Results + } + return nil +} + +type SearchResponseV3_Result struct { + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` +} + +func (m *SearchResponseV3_Result) Reset() { *m = SearchResponseV3_Result{} } +func (m *SearchResponseV3_Result) String() string { return proto.CompactTextString(m) } +func (*SearchResponseV3_Result) ProtoMessage() {} +func (*SearchResponseV3_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type SearchRequestV3 struct { + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` +} + +func (m *SearchRequestV3) Reset() { *m = SearchRequestV3{} } +func (m *SearchRequestV3) String() string { return proto.CompactTextString(m) } +func (*SearchRequestV3) ProtoMessage() {} +func (*SearchRequestV3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*SearchResponseV3)(nil), "grpc.testingv3.SearchResponseV3") + proto.RegisterType((*SearchResponseV3_Result)(nil), "grpc.testingv3.SearchResponseV3.Result") + proto.RegisterType((*SearchRequestV3)(nil), "grpc.testingv3.SearchRequestV3") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for SearchServiceV3 service + +type SearchServiceV3Client interface { + Search(ctx context.Context, in *SearchRequestV3, opts ...grpc.CallOption) (*SearchResponseV3, error) + StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchServiceV3_StreamingSearchClient, error) +} + +type searchServiceV3Client struct { + cc *grpc.ClientConn +} + +func NewSearchServiceV3Client(cc *grpc.ClientConn) SearchServiceV3Client { + return &searchServiceV3Client{cc} +} + +func (c *searchServiceV3Client) Search(ctx context.Context, in *SearchRequestV3, opts ...grpc.CallOption) (*SearchResponseV3, error) { + out := new(SearchResponseV3) + err := grpc.Invoke(ctx, "/grpc.testingv3.SearchServiceV3/Search", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchServiceV3Client) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchServiceV3_StreamingSearchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SearchServiceV3_serviceDesc.Streams[0], c.cc, "/grpc.testingv3.SearchServiceV3/StreamingSearch", opts...) + if err != nil { + return nil, err + } + x := &searchServiceV3StreamingSearchClient{stream} + return x, nil +} + +type SearchServiceV3_StreamingSearchClient interface { + Send(*SearchRequestV3) error + Recv() (*SearchResponseV3, error) + grpc.ClientStream +} + +type searchServiceV3StreamingSearchClient struct { + grpc.ClientStream +} + +func (x *searchServiceV3StreamingSearchClient) Send(m *SearchRequestV3) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchServiceV3StreamingSearchClient) Recv() (*SearchResponseV3, error) { + m := new(SearchResponseV3) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for SearchServiceV3 service + +type SearchServiceV3Server interface { + Search(context.Context, *SearchRequestV3) (*SearchResponseV3, error) + StreamingSearch(SearchServiceV3_StreamingSearchServer) error +} + +func RegisterSearchServiceV3Server(s *grpc.Server, srv SearchServiceV3Server) { + s.RegisterService(&_SearchServiceV3_serviceDesc, srv) +} + +func _SearchServiceV3_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequestV3) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServiceV3Server).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testingv3.SearchServiceV3/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServiceV3Server).Search(ctx, req.(*SearchRequestV3)) + } + return interceptor(ctx, in, info, handler) +} + +func _SearchServiceV3_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServiceV3Server).StreamingSearch(&searchServiceV3StreamingSearchServer{stream}) +} + +type SearchServiceV3_StreamingSearchServer interface { + Send(*SearchResponseV3) error + Recv() (*SearchRequestV3, error) + grpc.ServerStream +} + +type searchServiceV3StreamingSearchServer struct { + grpc.ServerStream +} + +func (x *searchServiceV3StreamingSearchServer) Send(m *SearchResponseV3) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchServiceV3StreamingSearchServer) Recv() (*SearchRequestV3, error) { + m := new(SearchRequestV3) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SearchServiceV3_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testingv3.SearchServiceV3", + HandlerType: (*SearchServiceV3Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Search", + Handler: _SearchServiceV3_Search_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingSearch", + Handler: _SearchServiceV3_StreamingSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("testv3.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 240 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x91, 0x41, 0x4b, 0xc3, 0x40, + 0x10, 0x85, 0x59, 0x83, 0xd1, 0x8e, 0x62, 0xcb, 0xe2, 0x21, 0xe4, 0x62, 0xe8, 0xa5, 0x39, 0x2d, + 0xd2, 0xfd, 0x05, 0x9e, 0xf5, 0xb4, 0x81, 0xe2, 0xb5, 0x86, 0x21, 0x2e, 0xc4, 0x64, 0x3b, 0x33, + 0x09, 0xf8, 0x7b, 0xfc, 0x13, 0xfe, 0x3c, 0x49, 0xd2, 0x08, 0x0a, 0xe2, 0xa5, 0xb7, 0x7d, 0x8f, + 0xf7, 0xbe, 0xe5, 0x31, 0x70, 0x2d, 0xc8, 0xd2, 0x5b, 0x13, 0xa8, 0x95, 0x56, 0xdf, 0x54, 0x14, + 0x4a, 0x33, 0x58, 0xbe, 0xa9, 0x7a, 0xbb, 0xfe, 0x50, 0xb0, 0x2a, 0x70, 0x4f, 0xe5, 0xab, 0x43, + 0x0e, 0x6d, 0xc3, 0xb8, 0xb3, 0xfa, 0x01, 0x2e, 0x08, 0xb9, 0xab, 0x85, 0x13, 0x95, 0x45, 0xf9, + 0xd5, 0x76, 0x63, 0x7e, 0xd6, 0xcc, 0xef, 0x8a, 0x71, 0x63, 0xde, 0xcd, 0xbd, 0xf4, 0x09, 0xe2, + 0xc9, 0xd2, 0x2b, 0x88, 0x3a, 0xaa, 0x13, 0x95, 0xa9, 0x7c, 0xe1, 0x86, 0xa7, 0xbe, 0x85, 0x73, + 0xf1, 0x52, 0x63, 0x72, 0x36, 0x7a, 0x93, 0xd0, 0x29, 0x5c, 0x72, 0xe3, 0x43, 0x40, 0xe1, 0x24, + 0xca, 0xa2, 0x7c, 0xe1, 0xbe, 0xf5, 0x7a, 0x03, 0xcb, 0xf9, 0xc7, 0x43, 0x87, 0x2c, 0x3b, 0x3b, + 0x40, 0x0e, 0x1d, 0xd2, 0xfb, 0x11, 0x3c, 0x89, 0xed, 0xa7, 0x9a, 0x93, 0x05, 0x52, 0xef, 0xcb, + 0x61, 0xcd, 0x23, 0xc4, 0x93, 0xa5, 0xef, 0xfe, 0x9a, 0x71, 0x84, 0xa6, 0xd9, 0x7f, 0x3b, 0xf5, + 0x33, 0x2c, 0x0b, 0x21, 0xdc, 0xbf, 0xf9, 0xa6, 0x3a, 0x19, 0x35, 0x57, 0xf7, 0xea, 0x25, 0x1e, + 0x0f, 0x64, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd4, 0xe6, 0xa0, 0xf9, 0xb0, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto new file mode 100644 index 000000000000..1e175193ef63 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package grpc.testingv3; + +message SearchResponseV3 { + message Result { + string url = 1; + string title = 2; + repeated string snippets = 3; + } + repeated Result results = 1; +} + +message SearchRequestV3 { + string query = 1; +} + +service SearchServiceV3 { + rpc Search(SearchRequestV3) returns (SearchResponseV3); + rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index f28304d1be51..19bfc51e56fb 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -1,36 +1,23 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto + /* Package reflection implements server reflection service. @@ -227,6 +214,24 @@ func (s *serverReflectionServer) serviceMetadataForSymbol(name string) (interfac return nil, fmt.Errorf("unknown symbol: %v", name) } +// parseMetadata finds the file descriptor bytes specified meta. +// For SupportPackageIsVersion4, m is the name of the proto file, we +// call proto.FileDescriptor to get the byte slice. +// For SupportPackageIsVersion3, m is a byte slice itself. +func parseMetadata(meta interface{}) ([]byte, bool) { + // Check if meta is the file name. + if fileNameForMeta, ok := meta.(string); ok { + return proto.FileDescriptor(fileNameForMeta), true + } + + // Check if meta is the byte slice. + if enc, ok := meta.([]byte); ok { + return enc, true + } + + return nil, false +} + // fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, // does marshalling on it and returns the marshalled result. // The given symbol can be a type, a service or a method. @@ -249,12 +254,11 @@ func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ( } // Metadata not valid. - fileNameForMeta, ok := meta.(string) + enc, ok := parseMetadata(meta) if !ok { return nil, fmt.Errorf("invalid file descriptor for symbol: %v", name) } - enc := proto.FileDescriptor(fileNameForMeta) fd, err = s.decodeFileDesc(enc) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection_test.go b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go index 13b05b627733..908589085aa8 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection_test.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection_test.go @@ -1,36 +1,26 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing/ grpc_testing/proto2.proto grpc_testing/proto2_ext.proto grpc_testing/proto2_ext2.proto grpc_testing/test.proto + +// Note: grpc_testingv3/testv3.pb.go is not re-generated because it was +// intentionally generated by an older version of protoc-gen-go. + package reflection import ( @@ -46,17 +36,20 @@ import ( "google.golang.org/grpc" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" + pbv3 "google.golang.org/grpc/reflection/grpc_testingv3" ) var ( s = &serverReflectionServer{} // fileDescriptor of each test proto file. fdTest *dpb.FileDescriptorProto + fdTestv3 *dpb.FileDescriptorProto fdProto2 *dpb.FileDescriptorProto fdProto2Ext *dpb.FileDescriptorProto fdProto2Ext2 *dpb.FileDescriptorProto // fileDescriptor marshalled. fdTestByte []byte + fdTestv3Byte []byte fdProto2Byte []byte fdProto2ExtByte []byte fdProto2Ext2Byte []byte @@ -80,6 +73,7 @@ func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { func init() { fdTest, fdTestByte = loadFileDesc("test.proto") + fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") fdProto2, fdProto2Byte = loadFileDesc("proto2.proto") fdProto2Ext, fdProto2ExtByte = loadFileDesc("proto2_ext.proto") fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("proto2_ext2.proto") @@ -178,6 +172,16 @@ func (s *server) StreamingSearch(stream pb.SearchService_StreamingSearchServer) return nil } +type serverV3 struct{} + +func (s *serverV3) Search(ctx context.Context, in *pbv3.SearchRequestV3) (*pbv3.SearchResponseV3, error) { + return &pbv3.SearchResponseV3{}, nil +} + +func (s *serverV3) StreamingSearch(stream pbv3.SearchServiceV3_StreamingSearchServer) error { + return nil +} + func TestReflectionEnd2end(t *testing.T) { // Start server. lis, err := net.Listen("tcp", "localhost:0") @@ -186,6 +190,7 @@ func TestReflectionEnd2end(t *testing.T) { } s := grpc.NewServer() pb.RegisterSearchServiceServer(s, &server{}) + pbv3.RegisterSearchServiceV3Server(s, &serverV3{}) // Register reflection service on s. Register(s) go s.Serve(lis) @@ -198,7 +203,7 @@ func TestReflectionEnd2end(t *testing.T) { defer conn.Close() c := rpb.NewServerReflectionClient(conn) - stream, err := c.ServerReflectionInfo(context.Background()) + stream, err := c.ServerReflectionInfo(context.Background(), grpc.FailFast(false)) if err != nil { t.Fatalf("cannot get ServerReflectionInfo: %v", err) } @@ -286,6 +291,11 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe {"grpc.testing.SearchService.StreamingSearch", fdTestByte}, {"grpc.testing.SearchResponse", fdTestByte}, {"grpc.testing.ToBeExtended", fdProto2Byte}, + // Test support package v3. + {"grpc.testingv3.SearchServiceV3", fdTestv3Byte}, + {"grpc.testingv3.SearchServiceV3.Search", fdTestv3Byte}, + {"grpc.testingv3.SearchServiceV3.StreamingSearch", fdTestv3Byte}, + {"grpc.testingv3.SearchResponseV3", fdTestv3Byte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ @@ -484,7 +494,11 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_ListServicesResponse: services := r.GetListServicesResponse().Service - want := []string{"grpc.testing.SearchService", "grpc.reflection.v1alpha.ServerReflection"} + want := []string{ + "grpc.testingv3.SearchServiceV3", + "grpc.testing.SearchService", + "grpc.reflection.v1alpha.ServerReflection", + } // Compare service names in response with want. if len(services) != len(want) { t.Errorf("= %v, want service names: %v", services, want) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000000..f31bbb60fd58 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,379 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("missing address") +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{freq: defaultFreq} +} + +type dnsBuilder struct { + // frequency of polling the DNS server. + freq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.freq, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) + d.cc.NewServiceConfig(string(sc)) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + var newAddrs []resolver.Address + newAddrs = d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + if r.Intn(100)+1 > *a { + return false + } + return true +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go new file mode 100644 index 000000000000..590791168230 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver_test.go @@ -0,0 +1,898 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "fmt" + "net" + "os" + "reflect" + "sync" + "testing" + "time" + + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/test/leakcheck" +) + +func TestMain(m *testing.M) { + cleanup := replaceNetFunc() + code := m.Run() + cleanup() + os.Exit(code) +} + +const ( + txtBytesLimit = 255 +) + +type testClientConn struct { + target string + m1 sync.Mutex + addrs []resolver.Address + a int + m2 sync.Mutex + sc string + s int +} + +func (t *testClientConn) NewAddress(addresses []resolver.Address) { + t.m1.Lock() + defer t.m1.Unlock() + t.addrs = addresses + t.a++ +} + +func (t *testClientConn) getAddress() ([]resolver.Address, int) { + t.m1.Lock() + defer t.m1.Unlock() + return t.addrs, t.a +} + +func (t *testClientConn) NewServiceConfig(serviceConfig string) { + t.m2.Lock() + defer t.m2.Unlock() + t.sc = serviceConfig + t.s++ +} + +func (t *testClientConn) getSc() (string, int) { + t.m2.Lock() + defer t.m2.Unlock() + return t.sc, t.s +} + +var hostLookupTbl = struct { + sync.Mutex + tbl map[string][]string +}{ + tbl: map[string][]string{ + "foo.bar.com": {"1.2.3.4", "5.6.7.8"}, + "ipv4.single.fake": {"1.2.3.4"}, + "srv.ipv4.single.fake": {"2.4.6.8"}, + "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, + "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, + "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, + }, +} + +func hostLookup(host string) ([]string, error) { + hostLookupTbl.Lock() + defer hostLookupTbl.Unlock() + if addrs, cnt := hostLookupTbl.tbl[host]; cnt { + return addrs, nil + } + return nil, fmt.Errorf("failed to lookup host:%s resolution in hostLookupTbl", host) +} + +var srvLookupTbl = struct { + sync.Mutex + tbl map[string][]*net.SRV +}{ + tbl: map[string][]*net.SRV{ + "_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, + "_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, + }, +} + +func srvLookup(service, proto, name string) (string, []*net.SRV, error) { + cname := "_" + service + "._" + proto + "." + name + srvLookupTbl.Lock() + defer srvLookupTbl.Unlock() + if srvs, cnt := srvLookupTbl.tbl[cname]; cnt { + return cname, srvs, nil + } + return "", nil, fmt.Errorf("failed to lookup srv record for %s in srvLookupTbl", cname) +} + +// div divides a byte slice into a slice of strings, each of which is of maximum +// 255 bytes length, which is the length limit per TXT record in DNS. +func div(b []byte) []string { + var r []string + for i := 0; i < len(b); i += txtBytesLimit { + if i+txtBytesLimit > len(b) { + r = append(r, string(b[i:])) + } else { + r = append(r, string(b[i:i+txtBytesLimit])) + } + } + return r +} + +// scfs contains an array of service config file string in JSON format. +// Notes about the scfs contents and usage: +// scfs contains 4 service config file JSON strings for testing. Inside each +// service config file, there are multiple choices. scfs[0:3] each contains 5 +// choices, and first 3 choices are nonmatching choices based on canarying rule, +// while the last two are matched choices. scfs[3] only contains 3 choices, and +// all of them are nonmatching based on canarying rule. For each of scfs[0:3], +// the eventually returned service config, which is from the first of the two +// matched choices, is stored in the corresponding scs element (e.g. +// scfs[0]->scs[0]). scfs and scs elements are used in pair to test the dns +// resolver functionality, with scfs as the input and scs used for validation of +// the output. For scfs[3], it corresponds to empty service config, since there +// isn't a matched choice. +var ( + scfs = []string{ + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientLanguage": [ + "GO" + ], + "percentage": 100, + "serviceConfig": { + "methodConfig": [ + { + "name": [ + { + "method": "bar" + } + ], + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + } + }, + { + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true + } + ] + } + } +]`, + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientLanguage": [ + "GO" + ], + "percentage": 100, + "serviceConfig": { + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true, + "timeout": "1s", + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + } + }, + { + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true + } + ] + } + } +]`, + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientLanguage": [ + "GO" + ], + "percentage": 100, + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo" + } + ], + "waitForReady": true, + "timeout": "1s" + }, + { + "name": [ + { + "service": "bar" + } + ], + "waitForReady": false + } + ] + } + }, + { + "serviceConfig": { + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true + } + ] + } + } +]`, + `[ + { + "clientLanguage": [ + "CPP", + "JAVA" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "percentage": 0, + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + }, + { + "clientHostName": [ + "localhost" + ], + "serviceConfig": { + "loadBalancingPolicy": "grpclb", + "methodConfig": [ + { + "name": [ + { + "service": "all" + } + ], + "timeout": "1s" + } + ] + } + } +]`, + } +) + +// scs contains an array of service config string in JSON format. +var ( + scs = []string{ + `{ + "methodConfig": [ + { + "name": [ + { + "method": "bar" + } + ], + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + }`, + `{ + "methodConfig": [ + { + "name": [ + { + "service": "foo", + "method": "bar" + } + ], + "waitForReady": true, + "timeout": "1s", + "maxRequestMessageBytes": 1024, + "maxResponseMessageBytes": 1024 + } + ] + }`, + `{ + "loadBalancingPolicy": "round_robin", + "methodConfig": [ + { + "name": [ + { + "service": "foo" + } + ], + "waitForReady": true, + "timeout": "1s" + }, + { + "name": [ + { + "service": "bar" + } + ], + "waitForReady": false + } + ] + }`, + } +) + +// scLookupTbl is a set, which contains targets that have service config. Target +// not in this set should not have service config. +var scLookupTbl = map[string]bool{ + "foo.bar.com": true, + "srv.ipv4.single.fake": true, + "srv.ipv4.multi.fake": true, + "no.attribute": true, +} + +// generateSCF generates a slice of strings (aggregately representing a single +// service config file) for the input name, which mocks the result from a real +// DNS TXT record lookup. +func generateSCF(name string) []string { + var b []byte + switch name { + case "foo.bar.com": + b = []byte(scfs[0]) + case "srv.ipv4.single.fake": + b = []byte(scfs[1]) + case "srv.ipv4.multi.fake": + b = []byte(scfs[2]) + default: + b = []byte(scfs[3]) + } + if name == "no.attribute" { + return div(b) + } + return div(append([]byte(txtAttribute), b...)) +} + +// generateSC returns a service config string in JSON format for the input name. +func generateSC(name string) string { + _, cnt := scLookupTbl[name] + if !cnt || name == "no.attribute" { + return "" + } + switch name { + case "foo.bar.com": + return scs[0] + case "srv.ipv4.single.fake": + return scs[1] + case "srv.ipv4.multi.fake": + return scs[2] + default: + return "" + } +} + +var txtLookupTbl = struct { + sync.Mutex + tbl map[string][]string +}{ + tbl: map[string][]string{ + "foo.bar.com": generateSCF("foo.bar.com"), + "srv.ipv4.single.fake": generateSCF("srv.ipv4.single.fake"), + "srv.ipv4.multi.fake": generateSCF("srv.ipv4.multi.fake"), + "srv.ipv6.single.fake": generateSCF("srv.ipv6.single.fake"), + "srv.ipv6.multi.fake": generateSCF("srv.ipv6.multi.fake"), + "no.attribute": generateSCF("no.attribute"), + }, +} + +func txtLookup(host string) ([]string, error) { + txtLookupTbl.Lock() + defer txtLookupTbl.Unlock() + if scs, cnt := txtLookupTbl.tbl[host]; cnt { + return scs, nil + } + return nil, fmt.Errorf("failed to lookup TXT:%s resolution in txtLookupTbl", host) +} + +func TestResolve(t *testing.T) { + testDNSResolver(t) + testDNSResolveNow(t) + testIPResolver(t) +} + +func testDNSResolver(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + target string + addrWant []resolver.Address + scWant string + }{ + { + "foo.bar.com", + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, + generateSC("foo.bar.com"), + }, + { + "foo.bar.com:1234", + []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "5.6.7.8:1234"}}, + generateSC("foo.bar.com"), + }, + { + "srv.ipv4.single.fake", + []resolver.Address{{Addr: "1.2.3.4:1234", Type: resolver.GRPCLB, ServerName: "ipv4.single.fake"}, {Addr: "2.4.6.8" + colonDefaultPort}}, + generateSC("srv.ipv4.single.fake"), + }, + { + "srv.ipv4.multi.fake", + []resolver.Address{ + {Addr: "1.2.3.4:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, + {Addr: "5.6.7.8:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, + {Addr: "9.10.11.12:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, + }, + generateSC("srv.ipv4.multi.fake"), + }, + { + "srv.ipv6.single.fake", + []resolver.Address{{Addr: "[2607:f8b0:400a:801::1001]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.single.fake"}}, + generateSC("srv.ipv6.single.fake"), + }, + { + "srv.ipv6.multi.fake", + []resolver.Address{ + {Addr: "[2607:f8b0:400a:801::1001]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, + {Addr: "[2607:f8b0:400a:801::1002]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, + {Addr: "[2607:f8b0:400a:801::1003]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, + }, + generateSC("srv.ipv6.multi.fake"), + }, + { + "no.attribute", + nil, + generateSC("no.attribute"), + }, + } + + for _, a := range tests { + b := NewBuilder() + cc := &testClientConn{target: a.target} + r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOption{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var addrs []resolver.Address + var cnt int + for { + addrs, cnt = cc.getAddress() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + var sc string + for { + sc, cnt = cc.getSc() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(a.addrWant, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, addrs, a.addrWant) + } + if !reflect.DeepEqual(a.scWant, sc) { + t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) + } + r.Close() + } +} + +func mutateTbl(target string) func() { + hostLookupTbl.Lock() + oldHostTblEntry := hostLookupTbl.tbl[target] + hostLookupTbl.tbl[target] = hostLookupTbl.tbl[target][:len(oldHostTblEntry)-1] + hostLookupTbl.Unlock() + txtLookupTbl.Lock() + oldTxtTblEntry := txtLookupTbl.tbl[target] + txtLookupTbl.tbl[target] = []string{""} + txtLookupTbl.Unlock() + + return func() { + hostLookupTbl.Lock() + hostLookupTbl.tbl[target] = oldHostTblEntry + hostLookupTbl.Unlock() + txtLookupTbl.Lock() + txtLookupTbl.tbl[target] = oldTxtTblEntry + txtLookupTbl.Unlock() + } +} + +func testDNSResolveNow(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + target string + addrWant []resolver.Address + addrNext []resolver.Address + scWant string + scNext string + }{ + { + "foo.bar.com", + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}}, + generateSC("foo.bar.com"), + "", + }, + } + + for _, a := range tests { + b := NewBuilder() + cc := &testClientConn{target: a.target} + r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOption{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var addrs []resolver.Address + var cnt int + for { + addrs, cnt = cc.getAddress() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + var sc string + for { + sc, cnt = cc.getSc() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(a.addrWant, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, addrs, a.addrWant) + } + if !reflect.DeepEqual(a.scWant, sc) { + t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) + } + revertTbl := mutateTbl(a.target) + r.ResolveNow(resolver.ResolveNowOption{}) + for { + addrs, cnt = cc.getAddress() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + for { + sc, cnt = cc.getSc() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(a.addrNext, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, addrs, a.addrNext) + } + if !reflect.DeepEqual(a.scNext, sc) { + t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scNext) + } + revertTbl() + r.Close() + } +} + +const colonDefaultPort = ":" + defaultPort + +func testIPResolver(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + target string + want []resolver.Address + }{ + {"127.0.0.1", []resolver.Address{{Addr: "127.0.0.1" + colonDefaultPort}}}, + {"127.0.0.1:12345", []resolver.Address{{Addr: "127.0.0.1:12345"}}}, + {"::1", []resolver.Address{{Addr: "[::1]" + colonDefaultPort}}}, + {"[::1]:12345", []resolver.Address{{Addr: "[::1]:12345"}}}, + {"[::1]:", []resolver.Address{{Addr: "[::1]:443"}}}, + {"2001:db8:85a3::8a2e:370:7334", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, + {"[2001:db8:85a3::8a2e:370:7334]:12345", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}}, + {"[2001:db8::1]:http", []resolver.Address{{Addr: "[2001:db8::1]:http"}}}, + // TODO(yuxuanli): zone support? + } + + for _, v := range tests { + b := NewBuilder() + cc := &testClientConn{target: v.target} + r, err := b.Build(resolver.Target{Endpoint: v.target}, cc, resolver.BuildOption{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var addrs []resolver.Address + var cnt int + for { + addrs, cnt = cc.getAddress() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(v.want, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", v.target, addrs, v.want) + } + r.ResolveNow(resolver.ResolveNowOption{}) + for { + addrs, cnt = cc.getAddress() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + if !reflect.DeepEqual(v.want, addrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", v.target, addrs, v.want) + } + r.Close() + } +} + +func TestResolveFunc(t *testing.T) { + defer leakcheck.Check(t) + tests := []struct { + addr string + want error + }{ + // TODO(yuxuanli): More false cases? + {"www.google.com", nil}, + {"foo.bar:12345", nil}, + {"127.0.0.1", nil}, + {"127.0.0.1:12345", nil}, + {"[::1]:80", nil}, + {"[2001:db8:a0b:12f0::1]:21", nil}, + {":80", nil}, + {"127.0.0...1:12345", nil}, + {"[fe80::1%lo0]:80", nil}, + {"golang.org:http", nil}, + {"[2001:db8::1]:http", nil}, + {":", nil}, + {"", errMissingAddr}, + {"[2001:db8:a0b:12f0::1", errForInvalidTarget}, + } + + b := NewBuilder() + for _, v := range tests { + cc := &testClientConn{target: v.addr} + r, err := b.Build(resolver.Target{Endpoint: v.addr}, cc, resolver.BuildOption{}) + if err == nil { + r.Close() + } + if !reflect.DeepEqual(err, v.want) { + t.Errorf("Build(%q, cc, resolver.BuildOption{}) = %v, want %v", v.addr, err, v.want) + } + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 000000000000..b466bc8f6d45 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17_test.go b/vendor/google.golang.org/grpc/resolver/dns/go17_test.go new file mode 100644 index 000000000000..07fdcb03f88a --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17_test.go @@ -0,0 +1,52 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "fmt" + "net" + + "golang.org/x/net/context" +) + +var ( + errForInvalidTarget = fmt.Errorf("invalid target address [2001:db8:a0b:12f0::1, error info: missing ']' in address [2001:db8:a0b:12f0::1:443") +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + oldLookupTXT := lookupTXT + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + lookupTXT = func(ctx context.Context, host string) ([]string, error) { + return txtLookup(host) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + lookupTXT = oldLookupTXT + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 000000000000..fa34f14cad48 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18_test.go b/vendor/google.golang.org/grpc/resolver/dns/go18_test.go new file mode 100644 index 000000000000..8e016709c1d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18_test.go @@ -0,0 +1,51 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "context" + "fmt" + "net" +) + +var ( + errForInvalidTarget = fmt.Errorf("invalid target address [2001:db8:a0b:12f0::1, error info: address [2001:db8:a0b:12f0::1:443: missing ']' in address") +) + +func replaceNetFunc() func() { + oldLookupHost := lookupHost + oldLookupSRV := lookupSRV + oldLookupTXT := lookupTXT + lookupHost = func(ctx context.Context, host string) ([]string, error) { + return hostLookup(host) + } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) + } + lookupTXT = func(ctx context.Context, host string) ([]string, error) { + return txtLookup(host) + } + return func() { + lookupHost = oldLookupHost + lookupSRV = oldLookupSRV + lookupTXT = oldLookupTXT + } +} diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 000000000000..de2fb25b55b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual contains a resolver for testing purpose only. +package manual + +import ( + "strconv" + "time" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new test resolver builder with the given scheme. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + scheme string + + // Fields actually belong to the resolver. + cc resolver.ClientConn +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r.cc = cc + return r, nil +} + +// Scheme returns the test scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {} + +// Close is a noop for Resolver. +func (*Resolver) Close() {} + +// NewAddress calls cc.NewAddress. +func (r *Resolver) NewAddress(addrs []resolver.Address) { + r.cc.NewAddress(addrs) +} + +// NewServiceConfig calls cc.NewServiceConfig. +func (r *Resolver) NewServiceConfig(sc string) { + r.cc.NewServiceConfig(sc) +} + +// GenerateAndRegisterManualResolver generates a random scheme and a Resolver +// with it. It also regieter this Resolver. +// It returns the Resolver and a cleanup function to unregister it. +func GenerateAndRegisterManualResolver() (*Resolver, func()) { + scheme := strconv.FormatInt(time.Now().UnixNano(), 36) + r := NewBuilderWithScheme(scheme) + resolver.Register(r) + return r, func() { resolver.UnregisterForTesting(scheme) } +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 000000000000..4d4420aa4168 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. It's for gRPC internal +// test only. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000000..49307e8fe9e9 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme string +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. +// b.Scheme will be used as the scheme registered with this builder. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// If no builder is register with the scheme, the default scheme will +// be used. +// If the default scheme is not modified, "dns" will be the default +// scheme, and the preinstalled dns resolver will be used. +// If the default scheme is modified, and a resolver is registered with +// the scheme, that resolver will be returned. +// If the default scheme is modified, and no resolver is registered with +// the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + if b, ok := m[defaultScheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "dns". +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // It's the name of the grpc load balancer, which will be used for authentication. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name again. + // It's just a hint, resolver can ignore this if it's not necessary. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000000..7d53964d094d --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,139 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns "", s instead. +func split2(s, sep string) (string, string) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", s + } + return spl[0], spl[1] +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +func parseTarget(target string) (ret resolver.Target) { + ret.Scheme, ret.Endpoint = split2(target, "://") + ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/") + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +// +// This function could return nil, nil, in tests for old behaviors. +// TODO(bar) never return nil, nil when DNS becomes the default resolver. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + target := parseTarget(cc.target) + grpclog.Infof("dialing to target with scheme: %q", target.Scheme) + + rb := resolver.Get(target.Scheme) + if rb == nil { + // TODO(bar) return error when DNS becomes the default (implemented and + // registered by DNS package). + grpclog.Infof("could not get resolver for scheme: %q", target.Scheme) + return nil, nil + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{}) + if err != nil { + return nil, err + } + go ccr.watcher() + return ccr, nil +} + +// watcher processes address updates and service config updates sequencially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs) + // TODO(bar switching) this should never be nil. Pickfirst should be default. + if ccr.cc.balancerWrapper != nil { + // TODO(bar switching) create balancer if it's nil? + ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil) + } + case sc := <-ccr.scCh: + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go new file mode 100644 index 000000000000..024942301d03 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "testing" + + "google.golang.org/grpc/resolver" +) + +func TestParseTarget(t *testing.T) { + for _, test := range []resolver.Target{ + {"", "", ""}, + {"a", "", ""}, + {"", "a", ""}, + {"", "", "a"}, + {"a", "b", ""}, + {"a", "", "b"}, + {"", "a", "b"}, + {"a", "b", "c"}, + {"dns", "a.server.com", "google.com"}, + {"dns", "a.server.com", "google.com"}, + {"dns", "a.server.com", "google.com/?a=b"}, + } { + str := test.Scheme + "://" + test.Authority + "/" + test.Endpoint + got := parseTarget(str) + if got != test { + t.Errorf("parseTarget(%q) = %v, want %v", str, got, test) + } + } +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 34e1ad03b978..188a75fff946 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,15 +21,18 @@ package grpc import ( "bytes" "compress/gzip" + stdctx "context" "encoding/binary" "io" "io/ioutil" "math" "os" + "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -60,16 +48,25 @@ type Compressor interface { Type() string } -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} +type gzipCompressor struct { + pool sync.Pool } -type gzipCompressor struct { +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(ioutil.Discard) + }, + }, + } } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) if _, err := z.Write(p); err != nil { return err } @@ -89,6 +86,7 @@ type Decompressor interface { } type gzipDecompressor struct { + pool sync.Pool } // NewGZIPDecompressor creates a Decompressor based on GZIP. @@ -97,11 +95,26 @@ func NewGZIPDecompressor() Decompressor { } func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } } - defer z.Close() + + defer func() { + z.Close() + d.pool.Put(z) + }() return ioutil.ReadAll(z) } @@ -111,14 +124,19 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - peer *peer.Peer - traceInfo traceInfo // in trace.go + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials } -var defaultCallInfo = callInfo{failFast: true} +func defaultCallInfo() *callInfo { + return &callInfo{failFast: true} +} // CallOption configures a Call before it starts or extracts information from // a Call after it completes. @@ -132,6 +150,14 @@ type CallOption interface { after(*callInfo) } +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + type beforeCall func(c *callInfo) error func (o beforeCall) before(c *callInfo) error { return o(c) } @@ -173,7 +199,8 @@ func Peer(peer *peer.Peer) CallOption { // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will retry // the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md. Note: failFast is default to true. +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// Note: failFast is default to true. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -181,6 +208,31 @@ func FailFast(failFast bool) CallOption { }) } +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxReceiveMessageSize = &s + return nil + }) +} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxSendMessageSize = &s + return nil + }) +} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return beforeCall(func(c *callInfo) error { + c.creds = creds + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 @@ -197,7 +249,7 @@ type parser struct { r io.Reader // The header of a gRPC message. Find more detail - // at http://www.grpc.io/docs/guides/wire.html. + // at https://grpc.io/docs/guides/wire.html. header [5]byte } @@ -214,8 +266,8 @@ type parser struct { // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. -func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { return 0, nil, err } @@ -225,13 +277,13 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro if length == 0 { return pf, nil, nil } - if length > uint32(maxMsgSize) { - return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) + if length > uint32(maxReceiveMessageSize) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { + if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -240,19 +292,20 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro return pf, msg, nil } -// encode serializes msg and prepends the message header. If msg is nil, it -// generates the message header of 0 message length. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) { - var ( - b []byte - length uint +// encode serializes msg and returns a buffer of message header and a buffer of msg. +// If msg is nil, it generates the message header and an empty msg buffer. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { + var b []byte + const ( + payloadLen = 1 + sizeLen = 4 ) + if msg != nil { var err error - // TODO(zhaoq): optimize to reduce memory alloc and copying. b, err = c.Marshal(msg) if err != nil { - return nil, err + return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } if outPayload != nil { outPayload.Payload = msg @@ -262,39 +315,28 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl } if cp != nil { if err := cp.Do(cbuf, b); err != nil { - return nil, err + return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } b = cbuf.Bytes() } - length = uint(len(b)) - } - if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) } - const ( - payloadLen = 1 - sizeLen = 4 - ) - - var buf = make([]byte, payloadLen+sizeLen+len(b)) + if uint(len(b)) > math.MaxUint32 { + return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } - // Write payload format + bufHeader := make([]byte, payloadLen+sizeLen) if cp == nil { - buf[0] = byte(compressionNone) + bufHeader[0] = byte(compressionNone) } else { - buf[0] = byte(compressionMade) + bufHeader[0] = byte(compressionMade) } // Write length of b into buf - binary.BigEndian.PutUint32(buf[1:], uint32(length)) - // Copy encoded msg to buf - copy(buf[5:], b) - + binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) if outPayload != nil { - outPayload.WireLength = len(buf) + outPayload.WireLength = payloadLen + sizeLen + len(b) } - - return buf, nil + return bufHeader, b, nil } func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { @@ -310,8 +352,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error { - pf, d, err := p.recvMsg(maxMsgSize) +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return err } @@ -327,10 +369,10 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } - if len(d) > maxMsgSize { + if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) + return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) } if err := c.Unmarshal(d, m); err != nil { return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) @@ -346,14 +388,15 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ } type rpcInfo struct { + failfast bool bytesSent bool bytesReceived bool } type rpcInfoContextKey struct{} -func newContextWithRPCInfo(ctx context.Context) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{}) +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { @@ -363,41 +406,12 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { if ss, ok := rpcInfoFromContext(ctx); ok { - *ss = s + ss.bytesReceived = s.bytesReceived + ss.bytesSent = s.bytesSent } return } -// Code returns the error code for err if it was produced by the rpc system. -// Otherwise, it returns codes.Unknown. -// -// Deprecated; use status.FromError and Code method instead. -func Code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -// ErrorDesc returns the error description of err if it was produced by the rpc system. -// Otherwise, it returns err.Error() or empty string when err is nil. -// -// Deprecated; use status.FromError and Message method instead. -func ErrorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} - -// Errorf returns an error containing an error code and a description; -// Errorf returns nil if c is OK. -// -// Deprecated; use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { - return status.Errorf(c, format, a...) -} - // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { if _, ok := status.FromError(err); ok { @@ -407,12 +421,12 @@ func toRPCErr(err error) error { case transport.StreamError: return status.Error(e.Code, e.Desc) case transport.ConnectionError: - return status.Error(codes.Internal, e.Desc) + return status.Error(codes.Unavailable, e.Desc) default: switch err { - case context.DeadlineExceeded: + case context.DeadlineExceeded, stdctx.DeadlineExceeded: return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + case context.Canceled, stdctx.Canceled: return status.Error(codes.Canceled, err.Error()) case ErrClientConnClosing: return status.Error(codes.FailedPrecondition, err.Error()) @@ -433,9 +447,9 @@ func convertCode(err error) codes.Code { return codes.FailedPrecondition case os.ErrInvalid: return codes.InvalidArgument - case context.Canceled: + case context.Canceled, stdctx.Canceled: return codes.Canceled - case context.DeadlineExceeded: + case context.DeadlineExceeded, stdctx.DeadlineExceeded: return codes.DeadlineExceeded } switch { @@ -449,6 +463,36 @@ func convertCode(err error) codes.Code { return codes.Unknown } +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated; use status.FromError and Code method instead. +func Code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated; use status.FromError and Message method instead. +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated; use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + // MethodConfig defines the configuration recommended by the service providers for a // particular method. // This is EXPERIMENTAL and subject to change. @@ -456,24 +500,22 @@ type MethodConfig struct { // WaitForReady indicates whether RPCs sent to this method should wait until // the connection is ready by default (!failfast). The value specified via the // gRPC client API will override the value set here. - WaitForReady bool + WaitForReady *bool // Timeout is the default timeout for RPCs sent to this method. The actual // deadline used will be the minimum of the value specified here and the value // set by the application via the gRPC client API. If either one is not set, // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout time.Duration + Timeout *time.Duration // MaxReqSize is the maximum allowed payload size for an individual request in a // stream (client->server) in bytes. The size which is measured is the serialized // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minumum of the value specified here and the value set + // The actual value used is the minimum of the value specified here and the value set // by the application via the gRPC client API. If either one is not set, then the other // will be used. If neither is set, then the built-in default is used. - // TODO: support this. - MaxReqSize uint32 + MaxReqSize *int // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. - // TODO: support this. - MaxRespSize uint32 + MaxRespSize *int } // ServiceConfig is provided by the service provider and contains parameters for how @@ -484,9 +526,38 @@ type ServiceConfig struct { // via grpc.WithBalancer will override this. LB Balancer // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. Methods map[string]MethodConfig } +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +// SupportPackageIsVersion3 is referenced from generated protocol buffer files. +// The latest support package version is 4. +// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the +// next support package version update. +const SupportPackageIsVersion3 = true + // SupportPackageIsVersion4 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the grpc package. // @@ -496,6 +567,6 @@ type ServiceConfig struct { const SupportPackageIsVersion4 = true // Version is the current grpc version. -const Version = "1.3.0" +const Version = "1.7.5" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/rpc_util_test.go b/vendor/google.golang.org/grpc/rpc_util_test.go index 8c92b963d3b9..23c471e2e407 100644 --- a/vendor/google.golang.org/grpc/rpc_util_test.go +++ b/vendor/google.golang.org/grpc/rpc_util_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,6 +32,16 @@ import ( "google.golang.org/grpc/transport" ) +type fullReader struct { + reader io.Reader +} + +func (f fullReader) Read(p []byte) (int, error) { + return io.ReadFull(f.reader, p) +} + +var _ CallOption = EmptyCallOption{} // ensure EmptyCallOption implements the interface + func TestSimpleParsing(t *testing.T) { bigMsg := bytes.Repeat([]byte{'x'}, 1<<24) for _, test := range []struct { @@ -65,7 +60,7 @@ func TestSimpleParsing(t *testing.T) { // Check that messages with length >= 2^24 are parsed. {append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone}, } { - buf := bytes.NewReader(test.p) + buf := fullReader{bytes.NewReader(test.p)} parser := &parser{r: buf} pt, b, err := parser.recvMsg(math.MaxInt32) if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { @@ -77,7 +72,7 @@ func TestSimpleParsing(t *testing.T) { func TestMultipleParsing(t *testing.T) { // Set a byte stream consists of 3 messages with their headers. p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} - b := bytes.NewReader(p) + b := fullReader{bytes.NewReader(p)} parser := &parser{r: b} wantRecvs := []struct { @@ -109,14 +104,15 @@ func TestEncode(t *testing.T) { msg proto.Message cp Compressor // outputs - b []byte - err error + hdr []byte + data []byte + err error }{ - {nil, nil, []byte{0, 0, 0, 0, 0}, nil}, + {nil, nil, []byte{0, 0, 0, 0, 0}, []byte{}, nil}, } { - b, err := encode(protoCodec{}, test.msg, nil, nil, nil) - if err != test.err || !bytes.Equal(b, test.b) { - t.Fatalf("encode(_, _, %v, _) = %v, %v\nwant %v, %v", test.cp, b, err, test.b, test.err) + hdr, data, err := encode(protoCodec{}, test.msg, nil, nil, nil) + if err != test.err || !bytes.Equal(hdr, test.hdr) || !bytes.Equal(data, test.data) { + t.Fatalf("encode(_, _, %v, _) = %v, %v, %v\nwant %v, %v, %v", test.cp, hdr, data, err, test.hdr, test.data, test.err) } } } @@ -130,7 +126,7 @@ func TestCompress(t *testing.T) { // outputs err error }{ - {make([]byte, 1024), &gzipCompressor{}, &gzipDecompressor{}, nil}, + {make([]byte, 1024), NewGZIPCompressor(), NewGZIPDecompressor(), nil}, } { b := new(bytes.Buffer) if err := test.cp.Do(b, test.data); err != test.err { @@ -153,7 +149,7 @@ func TestToRPCErr(t *testing.T) { errOut error }{ {transport.StreamError{Code: codes.Unknown, Desc: ""}, status.Error(codes.Unknown, "")}, - {transport.ErrConnClosing, status.Error(codes.Internal, transport.ErrConnClosing.Desc)}, + {transport.ErrConnClosing, status.Error(codes.Unavailable, transport.ErrConnClosing.Desc)}, } { err := toRPCErr(test.errIn) if _, ok := status.FromError(err); !ok { @@ -169,8 +165,8 @@ func TestToRPCErr(t *testing.T) { // bytes. func bmEncode(b *testing.B, mSize int) { msg := &perfpb.Buffer{Body: make([]byte, mSize)} - encoded, _ := encode(protoCodec{}, msg, nil, nil, nil) - encodedSz := int64(len(encoded)) + encodeHdr, encodeData, _ := encode(protoCodec{}, msg, nil, nil, nil) + encodedSz := int64(len(encodeHdr) + len(encodeData)) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -202,3 +198,40 @@ func BenchmarkEncode512KiB(b *testing.B) { func BenchmarkEncode1MiB(b *testing.B) { bmEncode(b, 1024*1024) } + +// bmCompressor benchmarks a compressor of a Protocol Buffer message containing +// mSize bytes. +func bmCompressor(b *testing.B, mSize int, cp Compressor) { + payload := make([]byte, mSize) + cBuf := bytes.NewBuffer(make([]byte, mSize)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + cp.Do(cBuf, payload) + cBuf.Reset() + } +} + +func BenchmarkGZIPCompressor1B(b *testing.B) { + bmCompressor(b, 1, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor1KiB(b *testing.B) { + bmCompressor(b, 1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor8KiB(b *testing.B) { + bmCompressor(b, 8*1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor64KiB(b *testing.B) { + bmCompressor(b, 64*1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor512KiB(b *testing.B) { + bmCompressor(b, 512*1024, NewGZIPCompressor()) +} + +func BenchmarkGZIPCompressor1MiB(b *testing.B) { + bmCompressor(b, 1024*1024, NewGZIPCompressor()) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index b15f71c6c182..787665dfeb31 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,6 +23,7 @@ import ( "errors" "fmt" "io" + "math" "net" "net/http" "reflect" @@ -61,6 +47,11 @@ import ( "google.golang.org/grpc/transport" ) +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. @@ -96,6 +87,7 @@ type Server struct { mu sync.Mutex // guards following lis map[net.Listener]bool conns map[io.Closer]bool + serve bool drain bool ctx context.Context cancel context.CancelFunc @@ -107,27 +99,69 @@ type Server struct { } type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - maxMsgSize int - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - inTapHandle tap.ServerInHandle - statsHandler stats.Handler - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server - unknownStreamDesc *StreamDesc - keepaliveParams keepalive.ServerParameters - keepalivePolicy keepalive.EnforcementPolicy + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration } -var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, +} -// A ServerOption sets options. +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { return func(o *options) { @@ -163,11 +197,25 @@ func RPCDecompressor(dc Decompressor) ServerOption { } } -// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. -// If this is not set, gRPC uses the default 4MB. +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { return func(o *options) { - o.maxMsgSize = m + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m } } @@ -192,7 +240,7 @@ func Creds(c credentials.TransportCredentials) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { return func(o *options) { if o.unaryInt != nil { - panic("The unary server interceptor has been set.") + panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i } @@ -203,7 +251,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption { return func(o *options) { if o.streamInt != nil { - panic("The stream server interceptor has been set.") + panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i } @@ -214,7 +262,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption { return func(o *options) { if o.inTapHandle != nil { - panic("The tap handle has been set.") + panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h } @@ -229,10 +277,10 @@ func StatsHandler(h stats.Handler) ServerOption { // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service -// handler that will be invoked instead of returning the the "unimplemented" gRPC +// handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. // The handling function has full access to the Context of the request and the -// stream, and the invocation passes through interceptors. +// stream, and the invocation bypasses interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return func(o *options) { o.unknownStreamDesc = &StreamDesc{ @@ -245,11 +293,20 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { } } +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +func ConnectionTimeout(d time.Duration) ServerOption { + return func(o *options) { + o.connectionTimeout = d + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { - var opts options - opts.maxMsgSize = defaultMaxMsgSize + opts := defaultServerOptions for _, o := range opt { o(&opts) } @@ -288,8 +345,8 @@ func (s *Server) errorf(format string, a ...interface{}) { } } -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() @@ -304,6 +361,9 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } @@ -334,7 +394,7 @@ type MethodInfo struct { IsServerStream bool } -// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. @@ -392,6 +452,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") + s.serve = true if s.lis == nil { s.mu.Unlock() lis.Close() @@ -427,10 +488,12 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.mu.Unlock() + timer := time.NewTimer(tempDelay) select { - case <-time.After(tempDelay): + case <-timer.C: case <-s.ctx.Done(): } + timer.Stop() continue } s.mu.Lock() @@ -448,16 +511,18 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandShake returns ErrConnDispatched, keep rawConn open. + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. if err != credentials.ErrConnDispatched { rawConn.Close() } + rawConn.SetDeadline(time.Time{}) return } @@ -470,25 +535,32 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Unlock() if s.opts.useHandlerImpl { + rawConn.SetDeadline(time.Time{}) s.serveUsingHandler(conn) } else { - s.serveHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + rawConn.SetDeadline(time.Time{}) + s.serveStreams(st) } } -// serveHTTP2Transport sets up a http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go) and -// serves streams on it. -// This is run in its own goroutine (it does network I/O in -// transport.NewServerTransport). -func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, - InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, - KeepaliveParams: s.opts.keepaliveParams, - KeepalivePolicy: s.opts.keepalivePolicy, + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -496,14 +568,14 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - return + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil } if !s.addConn(st) { st.Close() - return + return nil } - s.serveStreams(st) + return st } func (s *Server) serveStreams(st transport.ServerTransport) { @@ -554,6 +626,30 @@ func (s *Server) serveUsingHandler(conn net.Conn) { }) } +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r) if err != nil { @@ -618,18 +714,15 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str if s.opts.statsHandler != nil { outPayload = &stats.OutPayload{} } - p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) + hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) - } - err = t.Write(stream, p, opts) + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + if len(data) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) @@ -644,9 +737,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -654,8 +745,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if trInfo != nil { defer trInfo.tr.Finish() trInfo.firstLine.client = false @@ -672,139 +763,137 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. stream.SetSendCompress(s.opts.cp.Type()) } p := &parser{r: stream} - for { // TODO: delete - pf, req, err := p.recvMsg(s.opts.maxMsgSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) } - return err } + return err + } - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return err - } - if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } + return err + } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } - // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), - } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) } - df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) } - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - return Errorf(codes.Internal, err.Error()) - } - } - if len(req) > s.opts.maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - return status.Errorf(codes.Internal, "grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) - } - if inPayload != nil { - inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) - sh.HandleRPC(stream.Context(), inPayload) - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(convertCode(appErr), appErr.Error()) - appStatus, _ = status.FromError(appErr) - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - trInfo.tr.SetError() - } - if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - return appErr + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) } if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) } - opts := &transport.Options{ - Last: true, - Delay: false, + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } - return err } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - // TODO: Should we be logging if writing status failed here, like above? - // Should the logging be in WriteStatus? Should we ignore the WriteStatus - // error or allow the stats handler to see it? - return t.WriteStatus(stream, status.New(codes.OK, "")) + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { @@ -814,9 +903,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp BeginTime: time.Now(), } sh.HandleRPC(stream.Context(), begin) - } - defer func() { - if sh != nil { + defer func() { end := &stats.End{ EndTime: time.Now(), } @@ -824,24 +911,22 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) - } - }() + }() + } if s.opts.cp != nil { stream.SetSendCompress(s.opts.cp.Type()) } ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxMsgSize: s.opts.maxMsgSize, - trInfo: trInfo, - statsHandler: sh, - } - if ss.cp != nil { - ss.cbuf = new(bytes.Buffer) + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -913,12 +998,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.InvalidArgument, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -943,7 +1028,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -973,7 +1058,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1011,8 +1096,9 @@ func (s *Server) Stop() { s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server to accept new -// connections and RPCs and blocks until all the pending RPCs are finished. +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. func (s *Server) GracefulStop() { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/server_test.go b/vendor/google.golang.org/grpc/server_test.go index 53968cc27277..6438b5f8ab97 100644 --- a/vendor/google.golang.org/grpc/server_test.go +++ b/vendor/google.golang.org/grpc/server_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -38,6 +23,8 @@ import ( "reflect" "strings" "testing" + + "google.golang.org/grpc/test/leakcheck" ) type emptyServiceServer interface{} @@ -45,6 +32,7 @@ type emptyServiceServer interface{} type testServer struct{} func TestStopBeforeServe(t *testing.T) { + defer leakcheck.Check(t) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to create listener: %v", err) @@ -66,6 +54,7 @@ func TestStopBeforeServe(t *testing.T) { } func TestGetServiceInfo(t *testing.T) { + defer leakcheck.Check(t) testSd := ServiceDesc{ ServiceName: "grpc.testing.EmptyService", HandlerType: (*emptyServiceServer)(nil), diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go index b24dcd8d343d..c0c14a24bb40 100644 --- a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_testing/test.proto /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - test.proto + grpc_testing/test.proto It has these top-level messages: SimpleRequest @@ -34,7 +33,6 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Unary request. type SimpleRequest struct { Id int32 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` } @@ -44,7 +42,13 @@ func (m *SimpleRequest) String() string { return proto.CompactTextStr func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -// Unary response, as configured by the request. +func (m *SimpleRequest) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + type SimpleResponse struct { Id int32 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` } @@ -54,6 +58,13 @@ func (m *SimpleResponse) String() string { return proto.CompactTextSt func (*SimpleResponse) ProtoMessage() {} func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *SimpleResponse) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + func init() { proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") @@ -77,6 +88,10 @@ type TestServiceClient interface { // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) + // Client stream + ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) + // Server stream + ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) } type testServiceClient struct { @@ -127,6 +142,72 @@ func (x *testServiceFullDuplexCallClient) Recv() (*SimpleResponse, error) { return m, nil } +func (c *testServiceClient) ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/ClientStreamCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceClientStreamCallClient{stream} + return x, nil +} + +type TestService_ClientStreamCallClient interface { + Send(*SimpleRequest) error + CloseAndRecv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceClientStreamCallClient struct { + grpc.ClientStream +} + +func (x *testServiceClientStreamCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testServiceClientStreamCallClient) CloseAndRecv() (*SimpleResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testServiceClient) ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/ServerStreamCall", opts...) + if err != nil { + return nil, err + } + x := &testServiceServerStreamCallClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type TestService_ServerStreamCallClient interface { + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testServiceServerStreamCallClient struct { + grpc.ClientStream +} + +func (x *testServiceServerStreamCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // Server API for TestService service type TestServiceServer interface { @@ -137,6 +218,10 @@ type TestServiceServer interface { // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error + // Client stream + ClientStreamCall(TestService_ClientStreamCallServer) error + // Server stream + ServerStreamCall(*SimpleRequest, TestService_ServerStreamCallServer) error } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { @@ -187,6 +272,53 @@ func (x *testServiceFullDuplexCallServer) Recv() (*SimpleRequest, error) { return m, nil } +func _TestService_ClientStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServiceServer).ClientStreamCall(&testServiceClientStreamCallServer{stream}) +} + +type TestService_ClientStreamCallServer interface { + SendAndClose(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type testServiceClientStreamCallServer struct { + grpc.ServerStream +} + +func (x *testServiceClientStreamCallServer) SendAndClose(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testServiceClientStreamCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TestService_ServerStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SimpleRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServiceServer).ServerStreamCall(m, &testServiceServerStreamCallServer{stream}) +} + +type TestService_ServerStreamCallServer interface { + Send(*SimpleResponse) error + grpc.ServerStream +} + +type testServiceServerStreamCallServer struct { + grpc.ServerStream +} + +func (x *testServiceServerStreamCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), @@ -203,23 +335,35 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, ClientStreams: true, }, + { + StreamName: "ClientStreamCall", + Handler: _TestService_ClientStreamCall_Handler, + ClientStreams: true, + }, + { + StreamName: "ServerStreamCall", + Handler: _TestService_ServerStreamCall_Handler, + ServerStreams: true, + }, }, - Metadata: "test.proto", + Metadata: "grpc_testing/test.proto", } -func init() { proto.RegisterFile("test.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 167 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, - 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x03, 0x09, 0x64, - 0xe6, 0xa5, 0x2b, 0xc9, 0x73, 0xf1, 0x06, 0x67, 0xe6, 0x16, 0xe4, 0xa4, 0x06, 0xa5, 0x16, 0x96, - 0xa6, 0x16, 0x97, 0x08, 0xf1, 0x71, 0x31, 0x65, 0xa6, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xb0, 0x06, - 0x31, 0x65, 0xa6, 0x28, 0x29, 0x70, 0xf1, 0xc1, 0x14, 0x14, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x42, - 0x55, 0x30, 0xc3, 0x54, 0x18, 0x2d, 0x63, 0xe4, 0xe2, 0x0e, 0x49, 0x2d, 0x2e, 0x09, 0x4e, 0x2d, - 0x2a, 0xcb, 0x4c, 0x4e, 0x15, 0x72, 0xe3, 0xe2, 0x0c, 0xcd, 0x4b, 0x2c, 0xaa, 0x74, 0x4e, 0xcc, - 0xc9, 0x11, 0x92, 0xd6, 0x43, 0xb6, 0x4e, 0x0f, 0xc5, 0x2e, 0x29, 0x19, 0xec, 0x92, 0x50, 0x7b, - 0xfc, 0xb9, 0xf8, 0xdc, 0x4a, 0x73, 0x72, 0x5c, 0x4a, 0x0b, 0x72, 0x52, 0x2b, 0x28, 0x34, 0x4c, - 0x83, 0xd1, 0x80, 0x31, 0x89, 0x0d, 0x1c, 0x00, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8d, - 0x82, 0x5b, 0xdd, 0x0e, 0x01, 0x00, 0x00, + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0xd7, 0x07, 0xd1, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x3c, 0x20, 0x09, 0x3d, 0xa8, 0x84, 0x92, 0x3c, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, + 0x4e, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, + 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x53, 0x66, 0x8a, 0x92, 0x02, 0x17, 0x1f, 0x4c, 0x41, 0x71, + 0x41, 0x7e, 0x5e, 0x71, 0x2a, 0x54, 0x05, 0x33, 0x4c, 0x85, 0xd1, 0x09, 0x26, 0x2e, 0xee, 0x90, + 0xd4, 0xe2, 0x92, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x37, 0x2e, 0xce, 0xd0, 0xbc, + 0xc4, 0xa2, 0x4a, 0xe7, 0xc4, 0x9c, 0x1c, 0x21, 0x69, 0x3d, 0x64, 0xeb, 0xf4, 0x50, 0xec, 0x92, + 0x92, 0xc1, 0x2e, 0x09, 0xb5, 0xc7, 0x9f, 0x8b, 0xcf, 0xad, 0x34, 0x27, 0xc7, 0xa5, 0xb4, 0x20, + 0x27, 0xb5, 0x82, 0x42, 0xc3, 0x34, 0x18, 0x0d, 0x18, 0x85, 0xfc, 0xb9, 0x04, 0x9c, 0x73, 0x32, + 0x53, 0xf3, 0x4a, 0x82, 0x4b, 0x8a, 0x52, 0x13, 0x73, 0x29, 0x36, 0x12, 0x64, 0x20, 0xc8, 0xd3, + 0xa9, 0x45, 0x54, 0x31, 0xd0, 0x80, 0x31, 0x89, 0x0d, 0x1c, 0x45, 0xc6, 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x4c, 0x43, 0x27, 0x67, 0xbd, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto b/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto index 54e6f744f175..b49a0d5a7c75 100644 --- a/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto +++ b/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto3"; package grpc.testing; @@ -20,4 +34,10 @@ service TestService { // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. rpc FullDuplexCall(stream SimpleRequest) returns (stream SimpleResponse); + + // Client stream + rpc ClientStreamCall(stream SimpleRequest) returns (SimpleResponse); + + // Server stream + rpc ServerStreamCall(SimpleRequest) returns (stream SimpleResponse); } diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go index 26e1a8e2f081..05b384c69318 100644 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -45,19 +30,22 @@ type ConnTagInfo struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // TODO add QOS related fields. } // RPCTagInfo defines the relevant information needed by RPC context tagger. type RPCTagInfo struct { // FullMethodName is the RPC method in the format of /package.service/method. FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). type Handler interface { // TagRPC can attach some information to the given context. - // The returned context is used in the rest lifetime of the RPC. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. TagRPC(context.Context, *RPCTagInfo) context.Context // HandleRPC processes the RPC stats. HandleRPC(context.Context, RPCStats) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 43d6f0054dc4..d5aa2f793bf3 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -1,36 +1,23 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -39,6 +26,8 @@ package stats // import "google.golang.org/grpc/stats" import ( "net" "time" + + "golang.org/x/net/context" ) // RPCStats contains stats information about RPCs. @@ -49,7 +38,7 @@ type RPCStats interface { } // Begin contains stats when an RPC begins. -// FailFast are only valid if Client is true. +// FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool @@ -59,7 +48,7 @@ type Begin struct { FailFast bool } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} @@ -80,19 +69,19 @@ type InPayload struct { RecvTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} // InHeader contains stats when a header is received. -// FullMethod, addresses and Compression are only valid if Client is false. type InHeader struct { // Client is true if this InHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int + // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -103,7 +92,7 @@ type InHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} @@ -116,7 +105,7 @@ type InTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if the stats information is from client side. func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} @@ -137,19 +126,17 @@ type OutPayload struct { SentTime time.Time } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} // OutHeader contains stats when a header is sent. -// FullMethod, addresses and Compression are only valid if Client is true. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool - // WireLength is the wire length of header. - WireLength int + // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. @@ -160,7 +147,7 @@ type OutHeader struct { Compression string } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} @@ -173,7 +160,7 @@ type OutTrailer struct { WireLength int } -// IsClient indicates if this is from client side. +// IsClient indicates if this stats information is from client side. func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} @@ -184,7 +171,9 @@ type End struct { Client bool // EndTime is the time when the RPC ends. EndTime time.Time - // Error is the error just happened. It implements status.Status if non-nil. + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. Error error } @@ -221,3 +210,85 @@ type ConnEnd struct { func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/stats/stats_test.go b/vendor/google.golang.org/grpc/stats/stats_test.go index c770c151b348..141324c0d0ed 100644 --- a/vendor/google.golang.org/grpc/stats/stats_test.go +++ b/vendor/google.golang.org/grpc/stats/stats_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -120,6 +105,51 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ } } +func (s *testServer) ClientStreamCall(stream testpb.TestService_ClientStreamCallServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(testTrailerMetadata) + } + for { + in, err := stream.Recv() + if err == io.EOF { + // read done. + return stream.SendAndClose(&testpb.SimpleResponse{Id: int32(0)}) + } + if err != nil { + return err + } + + if in.Id == errorID { + return fmt.Errorf("got error id: %v", in.Id) + } + } +} + +func (s *testServer) ServerStreamCall(in *testpb.SimpleRequest, stream testpb.TestService_ServerStreamCallServer) error { + md, ok := metadata.FromIncomingContext(stream.Context()) + if ok { + if err := stream.SendHeader(md); err != nil { + return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) + } + stream.SetTrailer(testTrailerMetadata) + } + + if in.Id == errorID { + return fmt.Errorf("got error id: %v", in.Id) + } + + for i := 0; i < 5; i++ { + if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { + return err + } + } + return nil +} + // test is an end-to-end test. It should be created with the newTest // func, modified as needed, and then started with its startServer method. // It should be cleaned up with the tearDown method. @@ -185,14 +215,9 @@ func (te *test) startServer(ts testpb.TestServiceServer) { if te.testServer != nil { testpb.RegisterTestServiceServer(s, te.testServer) } - _, port, err := net.SplitHostPort(lis.Addr().String()) - if err != nil { - te.t.Fatalf("Failed to parse listener address: %v", err) - } - addr := "127.0.0.1:" + port go s.Serve(lis) - te.srvAddr = addr + te.srvAddr = lis.Addr().String() } func (te *test) clientConn() *grpc.ClientConn { @@ -218,12 +243,21 @@ func (te *test) clientConn() *grpc.ClientConn { return te.cc } +type rpcType int + +const ( + unaryRPC rpcType = iota + clientStreamRPC + serverStreamRPC + fullDuplexStreamRPC +) + type rpcConfig struct { count int // Number of requests and responses for streaming RPCs. success bool // Whether the RPC should succeed or return error. failfast bool - streaming bool // Whether the rpc should be a streaming RPC. - noLastRecv bool // Whether to call recv for io.EOF. When true, last recv won't be called. Only valid for streaming RPCs. + callType rpcType // Type of RPC. + noLastRecv bool // Whether to call recv for io.EOF. When true, last recv won't be called. Only valid for streaming RPCs. } func (te *test) doUnaryCall(c *rpcConfig) (*testpb.SimpleRequest, *testpb.SimpleResponse, error) { @@ -289,6 +323,64 @@ func (te *test) doFullDuplexCallRoundtrip(c *rpcConfig) ([]*testpb.SimpleRequest return reqs, resps, nil } +func (te *test) doClientStreamCall(c *rpcConfig) ([]*testpb.SimpleRequest, *testpb.SimpleResponse, error) { + var ( + reqs []*testpb.SimpleRequest + resp *testpb.SimpleResponse + err error + ) + tc := testpb.NewTestServiceClient(te.clientConn()) + stream, err := tc.ClientStreamCall(metadata.NewOutgoingContext(context.Background(), testMetadata), grpc.FailFast(c.failfast)) + if err != nil { + return reqs, resp, err + } + var startID int32 + if !c.success { + startID = errorID + } + for i := 0; i < c.count; i++ { + req := &testpb.SimpleRequest{ + Id: int32(i) + startID, + } + reqs = append(reqs, req) + if err = stream.Send(req); err != nil { + return reqs, resp, err + } + } + resp, err = stream.CloseAndRecv() + return reqs, resp, err +} + +func (te *test) doServerStreamCall(c *rpcConfig) (*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { + var ( + req *testpb.SimpleRequest + resps []*testpb.SimpleResponse + err error + ) + + tc := testpb.NewTestServiceClient(te.clientConn()) + + var startID int32 + if !c.success { + startID = errorID + } + req = &testpb.SimpleRequest{Id: startID} + stream, err := tc.ServerStreamCall(metadata.NewOutgoingContext(context.Background(), testMetadata), req, grpc.FailFast(c.failfast)) + if err != nil { + return req, resps, err + } + for { + var resp *testpb.SimpleResponse + resp, err := stream.Recv() + if err == io.EOF { + return req, resps, nil + } else if err != nil { + return req, resps, err + } + resps = append(resps, resp) + } +} + type expectedData struct { method string serverAddr string @@ -352,10 +444,6 @@ func checkInHeader(t *testing.T, d *gotData, e *expectedData) { if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } - // TODO check real length, not just > 0. - if st.WireLength <= 0 { - t.Fatalf("st.Lenght = 0, want > 0") - } if !d.client { if st.FullMethod != e.method { t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method) @@ -438,18 +526,13 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) { func checkInTrailer(t *testing.T, d *gotData, e *expectedData) { var ( ok bool - st *stats.InTrailer ) - if st, ok = d.s.(*stats.InTrailer); !ok { + if _, ok = d.s.(*stats.InTrailer); !ok { t.Fatalf("got %T, want InTrailer", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } - // TODO check real length, not just > 0. - if st.WireLength <= 0 { - t.Fatalf("st.Lenght = 0, want > 0") - } } func checkOutHeader(t *testing.T, d *gotData, e *expectedData) { @@ -463,10 +546,6 @@ func checkOutHeader(t *testing.T, d *gotData, e *expectedData) { if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } - // TODO check real length, not just > 0. - if st.WireLength <= 0 { - t.Fatalf("st.Lenght = 0, want > 0") - } if d.client { if st.FullMethod != e.method { t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method) @@ -550,10 +629,6 @@ func checkOutTrailer(t *testing.T, d *gotData, e *expectedData) { if st.Client { t.Fatalf("st IsClient = true, want false") } - // TODO check real length, not just > 0. - if st.WireLength <= 0 { - t.Fatalf("st.Lenght = 0, want > 0") - } } func checkEnd(t *testing.T, d *gotData, e *expectedData) { @@ -672,16 +747,35 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f defer te.tearDown() var ( - reqs []*testpb.SimpleRequest - resps []*testpb.SimpleResponse - err error + reqs []*testpb.SimpleRequest + resps []*testpb.SimpleResponse + err error + method string + + req *testpb.SimpleRequest + resp *testpb.SimpleResponse + e error ) - if !cc.streaming { - req, resp, e := te.doUnaryCall(cc) + + switch cc.callType { + case unaryRPC: + method = "/grpc.testing.TestService/UnaryCall" + req, resp, e = te.doUnaryCall(cc) reqs = []*testpb.SimpleRequest{req} resps = []*testpb.SimpleResponse{resp} err = e - } else { + case clientStreamRPC: + method = "/grpc.testing.TestService/ClientStreamCall" + reqs, resp, e = te.doClientStreamCall(cc) + resps = []*testpb.SimpleResponse{resp} + err = e + case serverStreamRPC: + method = "/grpc.testing.TestService/ServerStreamCall" + req, resps, e = te.doServerStreamCall(cc) + reqs = []*testpb.SimpleRequest{req} + err = e + case fullDuplexStreamRPC: + method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) } if cc.success != (err == nil) { @@ -713,22 +807,20 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f expect := &expectedData{ serverAddr: te.srvAddr, compression: tc.compress, + method: method, requests: reqs, responses: resps, err: err, } - if !cc.streaming { - expect.method = "/grpc.testing.TestService/UnaryCall" - } else { - expect.method = "/grpc.testing.TestService/FullDuplexCall" - } + h.mu.Lock() checkConnStats(t, h.gotConn) + h.mu.Unlock() checkServerStats(t, h.gotRPC, expect, checkFuncs) } func TestServerStatsUnaryRPC(t *testing.T) { - testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: true}, []func(t *testing.T, d *gotData, e *expectedData){ + testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkInPayload, @@ -740,7 +832,73 @@ func TestServerStatsUnaryRPC(t *testing.T) { } func TestServerStatsUnaryRPCError(t *testing.T) { - testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: false}, []func(t *testing.T, d *gotData, e *expectedData){ + testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkInPayload, + checkOutHeader, + checkOutTrailer, + checkEnd, + }) +} + +func TestServerStatsClientStreamRPC(t *testing.T) { + count := 5 + checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkOutHeader, + } + ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInPayload, + } + for i := 0; i < count; i++ { + checkFuncs = append(checkFuncs, ioPayFuncs...) + } + checkFuncs = append(checkFuncs, + checkOutPayload, + checkOutTrailer, + checkEnd, + ) + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: clientStreamRPC}, checkFuncs) +} + +func TestServerStatsClientStreamRPCError(t *testing.T) { + count := 1 + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: clientStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkOutHeader, + checkInPayload, + checkOutTrailer, + checkEnd, + }) +} + +func TestServerStatsServerStreamRPC(t *testing.T) { + count := 5 + checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkInHeader, + checkBegin, + checkInPayload, + checkOutHeader, + } + ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ + checkOutPayload, + } + for i := 0; i < count; i++ { + checkFuncs = append(checkFuncs, ioPayFuncs...) + } + checkFuncs = append(checkFuncs, + checkOutTrailer, + checkEnd, + ) + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: serverStreamRPC}, checkFuncs) +} + +func TestServerStatsServerStreamRPCError(t *testing.T) { + count := 5 + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: serverStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkInPayload, @@ -750,7 +908,7 @@ func TestServerStatsUnaryRPCError(t *testing.T) { }) } -func TestServerStatsStreamingRPC(t *testing.T) { +func TestServerStatsFullDuplexRPC(t *testing.T) { count := 5 checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, @@ -768,12 +926,12 @@ func TestServerStatsStreamingRPC(t *testing.T) { checkOutTrailer, checkEnd, ) - testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, streaming: true}, checkFuncs) + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}, checkFuncs) } -func TestServerStatsStreamingRPCError(t *testing.T) { +func TestServerStatsFullDuplexRPCError(t *testing.T) { count := 5 - testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, streaming: true}, []func(t *testing.T, d *gotData, e *expectedData){ + testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkOutHeader, @@ -800,13 +958,14 @@ func checkClientStats(t *testing.T, got []*gotData, expect *expectedData, checkF t.Fatalf("got %v stats, want %v stats", len(got), expectLen) } - var rpcctx context.Context + var tagInfoInCtx *stats.RPCTagInfo for i := 0; i < len(got); i++ { if _, ok := got[i].s.(stats.RPCStats); ok { - if rpcctx != nil && got[i].ctx != rpcctx { - t.Fatalf("got different contexts with stats %T", got[i].s) + tagInfoInCtxNew, _ := got[i].ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo) + if tagInfoInCtx != nil && tagInfoInCtx != tagInfoInCtxNew { + t.Fatalf("got context containing different tagInfo with stats %T", got[i].s) } - rpcctx = got[i].ctx + tagInfoInCtx = tagInfoInCtxNew } } @@ -879,16 +1038,34 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map defer te.tearDown() var ( - reqs []*testpb.SimpleRequest - resps []*testpb.SimpleResponse - err error + reqs []*testpb.SimpleRequest + resps []*testpb.SimpleResponse + method string + err error + + req *testpb.SimpleRequest + resp *testpb.SimpleResponse + e error ) - if !cc.streaming { - req, resp, e := te.doUnaryCall(cc) + switch cc.callType { + case unaryRPC: + method = "/grpc.testing.TestService/UnaryCall" + req, resp, e = te.doUnaryCall(cc) reqs = []*testpb.SimpleRequest{req} resps = []*testpb.SimpleResponse{resp} err = e - } else { + case clientStreamRPC: + method = "/grpc.testing.TestService/ClientStreamCall" + reqs, resp, e = te.doClientStreamCall(cc) + resps = []*testpb.SimpleResponse{resp} + err = e + case serverStreamRPC: + method = "/grpc.testing.TestService/ServerStreamCall" + req, resps, e = te.doServerStreamCall(cc) + reqs = []*testpb.SimpleRequest{req} + err = e + case fullDuplexStreamRPC: + method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) } if cc.success != (err == nil) { @@ -924,23 +1101,21 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map expect := &expectedData{ serverAddr: te.srvAddr, compression: tc.compress, + method: method, requests: reqs, responses: resps, failfast: cc.failfast, err: err, } - if !cc.streaming { - expect.method = "/grpc.testing.TestService/UnaryCall" - } else { - expect.method = "/grpc.testing.TestService/FullDuplexCall" - } + h.mu.Lock() checkConnStats(t, h.gotConn) + h.mu.Unlock() checkClientStats(t, h.gotRPC, expect, checkFuncs) } func TestClientStatsUnaryRPC(t *testing.T) { - testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, failfast: false}, map[int]*checkFuncWithCount{ + testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, @@ -952,7 +1127,7 @@ func TestClientStatsUnaryRPC(t *testing.T) { } func TestClientStatsUnaryRPCError(t *testing.T) { - testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, failfast: false}, map[int]*checkFuncWithCount{ + testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, @@ -962,12 +1137,37 @@ func TestClientStatsUnaryRPCError(t *testing.T) { }) } -func TestClientStatsStreamingRPC(t *testing.T) { +func TestClientStatsClientStreamRPC(t *testing.T) { count := 5 - testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, streaming: true}, map[int]*checkFuncWithCount{ + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, + inHeader: {checkInHeader, 1}, outPayload: {checkOutPayload, count}, + inTrailer: {checkInTrailer, 1}, + inPayload: {checkInPayload, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsClientStreamRPCError(t *testing.T) { + count := 1 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + inHeader: {checkInHeader, 1}, + outPayload: {checkOutPayload, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsServerStreamRPC(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, inHeader: {checkInHeader, 1}, inPayload: {checkInPayload, count}, inTrailer: {checkInTrailer, 1}, @@ -975,10 +1175,21 @@ func TestClientStatsStreamingRPC(t *testing.T) { }) } -// If the user doesn't call the last recv() on clientSteam. -func TestClientStatsStreamingRPCNotCallingLastRecv(t *testing.T) { - count := 1 - testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, streaming: true, noLastRecv: true}, map[int]*checkFuncWithCount{ +func TestClientStatsServerStreamRPCError(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, 1}, + inHeader: {checkInHeader, 1}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestClientStatsFullDuplexRPC(t *testing.T) { + count := 5 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, count}, @@ -989,9 +1200,9 @@ func TestClientStatsStreamingRPCNotCallingLastRecv(t *testing.T) { }) } -func TestClientStatsStreamingRPCError(t *testing.T) { +func TestClientStatsFullDuplexRPCError(t *testing.T) { count := 5 - testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, streaming: true}, map[int]*checkFuncWithCount{ + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, @@ -1000,3 +1211,55 @@ func TestClientStatsStreamingRPCError(t *testing.T) { end: {checkEnd, 1}, }) } + +// If the user doesn't call the last recv() on clientStream. +func TestClientStatsFullDuplexRPCNotCallingLastRecv(t *testing.T) { + count := 1 + testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: fullDuplexStreamRPC, noLastRecv: true}, map[int]*checkFuncWithCount{ + begin: {checkBegin, 1}, + outHeader: {checkOutHeader, 1}, + outPayload: {checkOutPayload, count}, + inHeader: {checkInHeader, 1}, + inPayload: {checkInPayload, count}, + inTrailer: {checkInTrailer, 1}, + end: {checkEnd, 1}, + }) +} + +func TestTags(t *testing.T) { + b := []byte{5, 2, 4, 3, 1} + ctx := stats.SetTags(context.Background(), b) + if tg := stats.OutgoingTags(ctx); !reflect.DeepEqual(tg, b) { + t.Errorf("OutgoingTags(%v) = %v; want %v", ctx, tg, b) + } + if tg := stats.Tags(ctx); tg != nil { + t.Errorf("Tags(%v) = %v; want nil", ctx, tg) + } + + ctx = stats.SetIncomingTags(context.Background(), b) + if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, b) { + t.Errorf("Tags(%v) = %v; want %v", ctx, tg, b) + } + if tg := stats.OutgoingTags(ctx); tg != nil { + t.Errorf("OutgoingTags(%v) = %v; want nil", ctx, tg) + } +} + +func TestTrace(t *testing.T) { + b := []byte{5, 2, 4, 3, 1} + ctx := stats.SetTrace(context.Background(), b) + if tr := stats.OutgoingTrace(ctx); !reflect.DeepEqual(tr, b) { + t.Errorf("OutgoingTrace(%v) = %v; want %v", ctx, tr, b) + } + if tr := stats.Trace(ctx); tr != nil { + t.Errorf("Trace(%v) = %v; want nil", ctx, tr) + } + + ctx = stats.SetIncomingTrace(context.Background(), b) + if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, b) { + t.Errorf("Trace(%v) = %v; want %v", ctx, tr, b) + } + if tr := stats.OutgoingTrace(ctx); tr != nil { + t.Errorf("OutgoingTrace(%v) = %v; want nil", ctx, tr) + } +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 99a4cbe51129..871dc4b31c76 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -1,33 +1,18 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -43,9 +28,11 @@ package status import ( + "errors" "fmt" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" ) @@ -143,3 +130,39 @@ func FromError(err error) (s *Status, ok bool) { } return nil, false } + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} diff --git a/vendor/google.golang.org/grpc/status/status_test.go b/vendor/google.golang.org/grpc/status/status_test.go index 9a4f39a9f1da..69be8c9f6a36 100644 --- a/vendor/google.golang.org/grpc/status/status_test.go +++ b/vendor/google.golang.org/grpc/status/status_test.go @@ -1,44 +1,35 @@ /* * - * Copyright 2017, Google Inc. - * All rights reserved. + * Copyright 2017 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ package status import ( + "errors" + "fmt" "reflect" "testing" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" apb "github.com/golang/protobuf/ptypes/any" + dpb "github.com/golang/protobuf/ptypes/duration" + cpb "google.golang.org/genproto/googleapis/rpc/code" + epb "google.golang.org/genproto/googleapis/rpc/errdetails" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" ) @@ -127,3 +118,144 @@ func TestFromErrorOK(t *testing.T) { t.Fatalf("FromError(nil) = %v, %v; want , true", s, ok, code, message) } } + +func TestStatus_ErrorDetails(t *testing.T) { + tests := []struct { + code codes.Code + details []proto.Message + }{ + { + code: codes.NotFound, + details: nil, + }, + { + code: codes.NotFound, + details: []proto.Message{ + &epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }, + }, + }, + { + code: codes.Internal, + details: []proto.Message{ + &epb.DebugInfo{ + StackEntries: []string{ + "first stack", + "second stack", + }, + }, + }, + }, + { + code: codes.Unavailable, + details: []proto.Message{ + &epb.RetryInfo{ + RetryDelay: &dpb.Duration{Seconds: 60}, + }, + &epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }, + }, + }, + } + + for _, tc := range tests { + s, err := New(tc.code, "").WithDetails(tc.details...) + if err != nil { + t.Fatalf("(%v).WithDetails(%+v) failed: %v", str(s), tc.details, err) + } + details := s.Details() + for i := range details { + if !proto.Equal(details[i].(proto.Message), tc.details[i]) { + t.Fatalf("(%v).Details()[%d] = %+v, want %+v", str(s), i, details[i], tc.details[i]) + } + } + } +} + +func TestStatus_WithDetails_Fail(t *testing.T) { + tests := []*Status{ + nil, + FromProto(nil), + New(codes.OK, ""), + } + for _, s := range tests { + if s, err := s.WithDetails(); err == nil || s != nil { + t.Fatalf("(%v).WithDetails(%+v) = %v, %v; want nil, non-nil", str(s), []proto.Message{}, s, err) + } + } +} + +func TestStatus_ErrorDetails_Fail(t *testing.T) { + tests := []struct { + s *Status + i []interface{} + }{ + { + nil, + nil, + }, + { + FromProto(nil), + nil, + }, + { + New(codes.OK, ""), + []interface{}{}, + }, + { + FromProto(&spb.Status{ + Code: int32(cpb.Code_CANCELLED), + Details: []*apb.Any{ + { + TypeUrl: "", + Value: []byte{}, + }, + mustMarshalAny(&epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }), + }, + }), + []interface{}{ + errors.New(`message type url "" is invalid`), + &epb.ResourceInfo{ + ResourceType: "book", + ResourceName: "projects/1234/books/5678", + Owner: "User", + }, + }, + }, + } + for _, tc := range tests { + got := tc.s.Details() + if !reflect.DeepEqual(got, tc.i) { + t.Errorf("(%v).Details() = %+v, want %+v", str(tc.s), got, tc.i) + } + } +} + +func str(s *Status) string { + if s == nil { + return "nil" + } + if s.s == nil { + return "" + } + return fmt.Sprintf("", codes.Code(s.s.GetCode()), s.s.GetMessage(), s.s.GetDetails()) +} + +// mustMarshalAny converts a protobuf message to an any. +func mustMarshalAny(msg proto.Message) *apb.Any { + any, err := ptypes.MarshalAny(msg) + if err != nil { + panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", msg, err)) + } + return any +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 33f1c787b346..75eab40b1095 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -42,8 +27,10 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/transport" @@ -73,11 +60,17 @@ type Stream interface { // side. On server side, it simply returns the error to the caller. // SendMsg is called by generated code. Also Users can call SendMsg // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On // any other error, it aborts the stream and returns an RPC status. On // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } @@ -93,6 +86,11 @@ type ClientStream interface { // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. CloseSend() error + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. + // Always call Stream.RecvMsg() to get the final status if you care about the status of + // the RPC. Stream } @@ -109,29 +107,48 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var ( t transport.ClientTransport s *transport.Stream - put func() + done func(balancer.DoneInfo) cancel context.CancelFunc ) - c := defaultCallInfo - if mc, ok := cc.getMethodConfig(method); ok { - c.failFast = !mc.WaitForReady - if mc.Timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, mc.Timeout) - } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady } + + if mc.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer func() { + if err != nil { + cancel() + } + }() + } + + opts = append(cc.dopts.callOptions, opts...) for _, o := range opts { - if err := o.before(&c); err != nil { + if err := o.before(c); err != nil { return nil, toRPCErr(err) } } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - Flush: desc.ServerStreams && desc.ClientStreams, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } + if c.creds != nil { + callHdr.Creds = c.creds + } var trInfo traceInfo if EnableTracing { trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) @@ -151,32 +168,29 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - ctx = newContextWithRPCInfo(ctx) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method}) + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) begin := &stats.Begin{ Client: true, BeginTime: time.Now(), FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - } - defer func() { - if err != nil && sh != nil { - // Only handle end stats if err != nil. - end := &stats.End{ - Client: true, - Error: err, + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + } + sh.HandleRPC(ctx, end) } - sh.HandleRPC(ctx, end) - } - }() - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, + }() } for { - t, put, err = cc.getTransport(ctx, gopts) + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -194,15 +208,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth s, err = t.NewStream(ctx, callHdr) if err != nil { - if _, ok := err.(transport.ConnectionError); ok && put != nil { + if _, ok := err.(transport.ConnectionError); ok && done != nil { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - if put != nil { - put() - put = nil + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -211,20 +225,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } break } + // Set callInfo.peer object from stream's context. + if peer, ok := peer.FromContext(s.Context()); ok { + c.peer = peer + } cs := &clientStream{ - opts: opts, - c: c, - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - maxMsgSize: cc.dopts.maxMsgSize, - cancel: cancel, - - put: put, - t: t, - s: s, - p: &parser{r: s}, + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + cancel: cancel, + + done: done, + t: t, + s: s, + p: &parser{r: s}, tracing: EnableTracing, trInfo: trInfo, @@ -232,9 +249,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth statsCtx: ctx, statsHandler: cc.dopts.copts.StatsHandler, } - if cc.dopts.cp != nil { - cs.cbuf = new(bytes.Buffer) - } // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination // when there is no pending I/O operations on this stream. go func() { @@ -263,23 +277,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // clientStream implements a client side Stream. type clientStream struct { - opts []CallOption - c callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - cbuf *bytes.Buffer - dc Decompressor - maxMsgSize int - cancel context.CancelFunc + opts []CallOption + c *callInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + dc Decompressor + cancel context.CancelFunc tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex - put func() + done func(balancer.DoneInfo) closed bool finished bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), @@ -329,7 +341,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return } if err == io.EOF { - // Specialize the process for server streaming. SendMesg is only called + // Specialize the process for server streaming. SendMsg is only called // once when creating the stream object. io.EOF needs to be skipped when // the rpc is early finished (before the stream object is created.). // TODO: It is probably better to move this into the generated code. @@ -349,16 +361,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Client: true, } } - out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload) - defer func() { - if cs.cbuf != nil { - cs.cbuf.Reset() - } - }() + hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) if err != nil { - return Errorf(codes.Internal, "grpc: %v", err) + return err + } + if cs.c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *cs.c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) } - err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) + err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) if err == nil && outPayload != nil { outPayload.SentTime = time.Now() cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) @@ -373,7 +386,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { Client: true, } } - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, inPayload) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -396,7 +412,10 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } // Special handling for client streaming rpc. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, cs.maxMsgSize, nil) + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) @@ -424,7 +443,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { } func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) defer func() { if err != nil { cs.finish(err) @@ -464,15 +483,15 @@ func (cs *clientStream) finish(err error) { } }() for _, o := range cs.opts { - o.after(&cs.c) + o.after(cs.c) } - if cs.put != nil { + if cs.done != nil { updateRPCInfoInContext(cs.s.Context(), rpcInfo{ bytesSent: cs.s.BytesSent(), bytesReceived: cs.s.BytesReceived(), }) - cs.put() - cs.put = nil + cs.done(balancer.DoneInfo{Err: err}) + cs.done = nil } if cs.statsHandler != nil { end := &stats.End{ @@ -521,15 +540,15 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - maxMsgSize int - trInfo *traceInfo + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo statsHandler stats.Handler @@ -573,22 +592,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.mu.Unlock() } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } }() var outPayload *stats.OutPayload if ss.statsHandler != nil { outPayload = &stats.OutPayload{} } - out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload) - defer func() { - if ss.cbuf != nil { - ss.cbuf.Reset() - } - }() + hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) if err != nil { - err = Errorf(codes.Internal, "grpc: %v", err) return err } - if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { + if len(data) > ss.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } if outPayload != nil { @@ -612,12 +632,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } ss.mu.Unlock() } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } }() var inPayload *stats.InPayload if ss.statsHandler != nil { inPayload = &stats.InPayload{} } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { if err == io.EOF { return err } diff --git a/vendor/google.golang.org/grpc/stress/client/main.go b/vendor/google.golang.org/grpc/stress/client/main.go index dff85ffec29c..635f1ad38373 100644 --- a/vendor/google.golang.org/grpc/stress/client/main.go +++ b/vendor/google.golang.org/grpc/stress/client/main.go @@ -1,36 +1,23 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ +//go:generate protoc -I ../grpc_testing --go_out=plugins=grpc:../grpc_testing ../grpc_testing/metrics.proto + // client starts an interop client to do stress test and a metrics server to report qps. package main @@ -52,6 +39,7 @@ import ( "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" metricspb "google.golang.org/grpc/stress/grpc_testing" + "google.golang.org/grpc/testdata" ) var ( @@ -64,9 +52,7 @@ var ( useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") tlsServerName = flag.String("server_host_override", "foo.test.google.fr", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") - - // The test CA root cert file - testCAFile = "testdata/ca.pem" + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") ) // testCaseWithWeight contains the test case type and its weight. @@ -292,7 +278,10 @@ func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.C var creds credentials.TransportCredentials if testCA { var err error - creds, err = credentials.NewClientTLSFromFile(testCAFile, sn) + if *caFile == "" { + *caFile = testdata.Path("ca.pem") + } + creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } diff --git a/vendor/google.golang.org/grpc/stress/client/testdata/ca.pem b/vendor/google.golang.org/grpc/stress/client/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/stress/client/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/stress/client/testdata/server1.key b/vendor/google.golang.org/grpc/stress/client/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/stress/client/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/stress/client/testdata/server1.pem b/vendor/google.golang.org/grpc/stress/client/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/stress/client/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go index a1310b5f585e..466668a4df1e 100644 --- a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -// DO NOT EDIT! /* Package grpc_testing is a generated protocol buffer package. @@ -35,7 +34,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Reponse message containing the gauge name and value +// Response message containing the gauge name and value type GaugeResponse struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Types that are valid to be assigned to Value: @@ -75,6 +74,13 @@ func (m *GaugeResponse) GetValue() isGaugeResponse_Value { return nil } +func (m *GaugeResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *GaugeResponse) GetLongValue() int64 { if x, ok := m.GetValue().(*GaugeResponse_LongValue); ok { return x.LongValue @@ -185,6 +191,13 @@ func (m *GaugeRequest) String() string { return proto.CompactTextStri func (*GaugeRequest) ProtoMessage() {} func (*GaugeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *GaugeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type EmptyMessage struct { } @@ -341,21 +354,21 @@ var _MetricsService_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("metrics.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 253 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x4d, 0x2d, 0x29, - 0xca, 0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, - 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0x9a, 0xce, 0xc8, 0xc5, 0xeb, 0x9e, 0x58, 0x9a, - 0x9e, 0x1a, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x24, 0xc4, 0xc5, 0x92, 0x97, 0x98, - 0x9b, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0xc9, 0x73, 0x71, 0xe5, 0xe4, - 0xe7, 0xa5, 0xc7, 0x97, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x01, 0x65, 0x98, 0x3d, 0x18, 0x82, - 0x38, 0x41, 0x62, 0x61, 0x20, 0x21, 0x21, 0x65, 0x2e, 0x9e, 0x94, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, - 0xa8, 0x12, 0x66, 0xa0, 0x12, 0x46, 0xa0, 0x12, 0x6e, 0x88, 0x28, 0x5c, 0x51, 0x31, 0xd0, 0x25, - 0x70, 0x73, 0x58, 0x40, 0x36, 0x80, 0x14, 0x41, 0x44, 0xc1, 0x8a, 0x9c, 0xd8, 0xb9, 0x58, 0xc1, - 0xb2, 0x4a, 0x4a, 0x5c, 0x3c, 0x50, 0x87, 0x15, 0x96, 0x02, 0x1d, 0x8b, 0xcd, 0x5d, 0x4a, 0x7c, - 0x5c, 0x3c, 0xae, 0xb9, 0x05, 0x25, 0x95, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x46, 0x0b, - 0x18, 0xb9, 0xf8, 0x7c, 0x21, 0xbe, 0x0d, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x15, 0xf2, 0x04, - 0x1a, 0x93, 0x5a, 0xe2, 0x98, 0x93, 0x03, 0x36, 0xac, 0x58, 0x48, 0x4a, 0x0f, 0xd9, 0xff, 0x7a, - 0xc8, 0xda, 0xa5, 0xa4, 0x51, 0xe5, 0x50, 0xc2, 0xc5, 0x80, 0x51, 0xc8, 0x99, 0x8b, 0x03, 0x68, - 0x14, 0x58, 0x14, 0xdd, 0x18, 0x64, 0x97, 0xe2, 0x35, 0x26, 0x89, 0x0d, 0x1c, 0x0b, 0xc6, 0x80, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3f, 0x4f, 0xc3, 0x30, + 0x10, 0xc5, 0x6b, 0x5a, 0xfe, 0xf4, 0x70, 0x3b, 0x78, 0xaa, 0xca, 0x40, 0x14, 0x96, 0x4c, 0x11, + 0x82, 0x4f, 0x00, 0x08, 0xa5, 0x0c, 0x5d, 0x82, 0xc4, 0x8a, 0xd2, 0x70, 0xb2, 0x22, 0x39, 0x71, + 0xf0, 0x5d, 0x2a, 0xf1, 0x49, 0x58, 0xf9, 0xa8, 0xc8, 0x4e, 0x55, 0xa5, 0x08, 0x75, 0xb3, 0x7e, + 0xf7, 0xfc, 0xfc, 0x9e, 0x0f, 0x66, 0x35, 0xb2, 0xab, 0x4a, 0x4a, 0x5b, 0x67, 0xd9, 0x2a, 0xa9, + 0x5d, 0x5b, 0xa6, 0x8c, 0xc4, 0x55, 0xa3, 0xe3, 0x6f, 0x01, 0xb3, 0xac, 0xe8, 0x34, 0xe6, 0x48, + 0xad, 0x6d, 0x08, 0x95, 0x82, 0x49, 0x53, 0xd4, 0xb8, 0x10, 0x91, 0x48, 0xa6, 0x79, 0x38, 0xab, + 0x6b, 0x00, 0x63, 0x1b, 0xfd, 0xbe, 0x2d, 0x4c, 0x87, 0x8b, 0x93, 0x48, 0x24, 0xe3, 0xd5, 0x28, + 0x9f, 0x7a, 0xf6, 0xe6, 0x91, 0xba, 0x01, 0xf9, 0x61, 0xbb, 0x8d, 0xc1, 0x9d, 0x64, 0x1c, 0x89, + 0x44, 0xac, 0x46, 0xf9, 0x65, 0x4f, 0xf7, 0x22, 0x62, 0x57, 0xed, 0x7d, 0x26, 0xfe, 0x05, 0x2f, + 0xea, 0x69, 0x10, 0x3d, 0x9e, 0xc3, 0x69, 0x98, 0xc6, 0x31, 0xc8, 0x5d, 0xb0, 0xcf, 0x0e, 0x89, + 0xff, 0xcb, 0x15, 0xcf, 0x41, 0x3e, 0xd7, 0x2d, 0x7f, 0xad, 0x91, 0xa8, 0xd0, 0x78, 0xf7, 0x23, + 0x60, 0xbe, 0xee, 0xdb, 0xbe, 0xa2, 0xdb, 0x56, 0x25, 0xaa, 0x17, 0x90, 0x19, 0xf2, 0x83, 0x31, + 0xc1, 0x8c, 0xd4, 0x32, 0x1d, 0xf6, 0x4f, 0x87, 0xd7, 0x97, 0x57, 0x87, 0xb3, 0x83, 0x7f, 0xb9, + 0x15, 0xea, 0x09, 0x2e, 0x32, 0xe4, 0x40, 0xff, 0xda, 0x0c, 0x93, 0x1e, 0xb5, 0xd9, 0x9c, 0x85, + 0x2d, 0xdc, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto index 1202b20b8fd0..695040064317 100644 --- a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto @@ -1,31 +1,16 @@ -// Copyright 2015-2016, Google Inc. -// All rights reserved. +// Copyright 2015-2016 gRPC authors. // -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Contains the definitions for a metrics service and the type of metrics // exposed by the service. @@ -37,7 +22,7 @@ syntax = "proto3"; package grpc.testing; -// Reponse message containing the gauge name and value +// Response message containing the gauge name and value message GaugeResponse { string name = 1; oneof value { diff --git a/vendor/google.golang.org/grpc/stress/metrics_client/main.go b/vendor/google.golang.org/grpc/stress/metrics_client/main.go index 983a8ff24cce..6405ec85ebc1 100644 --- a/vendor/google.golang.org/grpc/stress/metrics_client/main.go +++ b/vendor/google.golang.org/grpc/stress/metrics_client/main.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index 0f3664767423..22b8fb50dea5 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -1,33 +1,18 @@ /* * - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,8 +32,20 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs when a new stream is created -// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead -// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming -// work in this handle. Otherwise all the RPCs would slow down. +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go new file mode 100644 index 000000000000..bc0ab839fc4d --- /dev/null +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -0,0 +1,229 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bufconn provides a net.Conn implemented by a buffer and related +// dialing and listening functionality. +package bufconn + +import ( + "fmt" + "io" + "net" + "sync" + "time" +) + +// Listener implements a net.Listener that creates local, buffered net.Conns +// via its Accept and Dial method. +type Listener struct { + mu sync.Mutex + sz int + ch chan net.Conn + done chan struct{} +} + +var errClosed = fmt.Errorf("Closed") + +// Listen returns a Listener that can only be contacted by its own Dialers and +// creates buffered connections between the two. +func Listen(sz int) *Listener { + return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} +} + +// Accept blocks until Dial is called, then returns a net.Conn for the server +// half of the connection. +func (l *Listener) Accept() (net.Conn, error) { + select { + case <-l.done: + return nil, errClosed + case c := <-l.ch: + return c, nil + } +} + +// Close stops the listener. +func (l *Listener) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + select { + case <-l.done: + // Already closed. + break + default: + close(l.done) + } + return nil +} + +// Addr reports the address of the listener. +func (l *Listener) Addr() net.Addr { return addr{} } + +// Dial creates an in-memory full-duplex network connection, unblocks Accept by +// providing it the server half of the connection, and returns the client half +// of the connection. +func (l *Listener) Dial() (net.Conn, error) { + p1, p2 := newPipe(l.sz), newPipe(l.sz) + select { + case <-l.done: + return nil, errClosed + case l.ch <- &conn{p1, p2}: + return &conn{p2, p1}, nil + } +} + +type pipe struct { + mu sync.Mutex + + // buf contains the data in the pipe. It is a ring buffer of fixed capacity, + // with r and w pointing to the offset to read and write, respsectively. + // + // Data is read between [r, w) and written to [w, r), wrapping around the end + // of the slice if necessary. + // + // The buffer is empty if r == len(buf), otherwise if r == w, it is full. + // + // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. + buf []byte + w, r int + + wwait sync.Cond + rwait sync.Cond + closed bool +} + +func newPipe(sz int) *pipe { + p := &pipe{buf: make([]byte, 0, sz)} + p.wwait.L = &p.mu + p.rwait.L = &p.mu + return p +} + +func (p *pipe) empty() bool { + return p.r == len(p.buf) +} + +func (p *pipe) full() bool { + return p.r < len(p.buf) && p.r == p.w +} + +func (p *pipe) Read(b []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + // Block until p has data. + for { + if p.closed { + return 0, io.ErrClosedPipe + } + if !p.empty() { + break + } + p.rwait.Wait() + } + wasFull := p.full() + + n = copy(b, p.buf[p.r:len(p.buf)]) + p.r += n + if p.r == cap(p.buf) { + p.r = 0 + p.buf = p.buf[:p.w] + } + + // Signal a blocked writer, if any + if wasFull { + p.wwait.Signal() + } + + return n, nil +} + +func (p *pipe) Write(b []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.closed { + return 0, io.ErrClosedPipe + } + for len(b) > 0 { + // Block until p is not full. + for { + if p.closed { + return 0, io.ErrClosedPipe + } + if !p.full() { + break + } + p.wwait.Wait() + } + wasEmpty := p.empty() + + end := cap(p.buf) + if p.w < p.r { + end = p.r + } + x := copy(p.buf[p.w:end], b) + b = b[x:] + n += x + p.w += x + if p.w > len(p.buf) { + p.buf = p.buf[:p.w] + } + if p.w == cap(p.buf) { + p.w = 0 + } + + // Signal a blocked reader, if any. + if wasEmpty { + p.rwait.Signal() + } + } + return n, nil +} + +func (p *pipe) Close() error { + p.mu.Lock() + defer p.mu.Unlock() + p.closed = true + // Signal all blocked readers and writers to return an error. + p.rwait.Broadcast() + p.wwait.Broadcast() + return nil +} + +type conn struct { + io.ReadCloser + io.WriteCloser +} + +func (c *conn) Close() error { + err1 := c.ReadCloser.Close() + err2 := c.WriteCloser.Close() + if err1 != nil { + return err1 + } + return err2 +} + +func (*conn) LocalAddr() net.Addr { return addr{} } +func (*conn) RemoteAddr() net.Addr { return addr{} } +func (c *conn) SetDeadline(t time.Time) error { return fmt.Errorf("unsupported") } +func (c *conn) SetReadDeadline(t time.Time) error { return fmt.Errorf("unsupported") } +func (c *conn) SetWriteDeadline(t time.Time) error { return fmt.Errorf("unsupported") } + +type addr struct{} + +func (addr) Network() string { return "bufconn" } +func (addr) String() string { return "bufconn" } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go new file mode 100644 index 000000000000..0f7bc2227971 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn_test.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package bufconn + +import ( + "fmt" + "io" + "net" + "reflect" + "testing" + "time" +) + +func testRW(r io.Reader, w io.Writer) error { + for i := 0; i < 20; i++ { + d := make([]byte, i) + for j := 0; j < i; j++ { + d[j] = byte(i - j) + } + var rn int + var rerr error + b := make([]byte, i) + done := make(chan struct{}) + go func() { + for rn < len(b) && rerr == nil { + var x int + x, rerr = r.Read(b[rn:]) + rn += x + } + close(done) + }() + wn, werr := w.Write(d) + if wn != i || werr != nil { + return fmt.Errorf("%v: w.Write(%v) = %v, %v; want %v, nil", i, d, wn, werr, i) + } + select { + case <-done: + case <-time.After(500 * time.Millisecond): + return fmt.Errorf("%v: r.Read never returned", i) + } + if rn != i || rerr != nil { + return fmt.Errorf("%v: r.Read = %v, %v; want %v, nil", i, rn, rerr, i) + } + if !reflect.DeepEqual(b, d) { + return fmt.Errorf("%v: r.Read read %v; want %v", i, b, d) + } + } + return nil +} + +func TestPipe(t *testing.T) { + p := newPipe(10) + if err := testRW(p, p); err != nil { + t.Fatalf(err.Error()) + } +} + +func TestPipeClose(t *testing.T) { + p := newPipe(10) + p.Close() + if _, err := p.Write(nil); err != io.ErrClosedPipe { + t.Fatalf("p.Write = _, %v; want _, %v", err, io.ErrClosedPipe) + } + if _, err := p.Read(nil); err != io.ErrClosedPipe { + t.Fatalf("p.Read = _, %v; want _, %v", err, io.ErrClosedPipe) + } +} + +func TestConn(t *testing.T) { + p1, p2 := newPipe(10), newPipe(10) + c1, c2 := &conn{p1, p2}, &conn{p2, p1} + + if err := testRW(c1, c2); err != nil { + t.Fatalf(err.Error()) + } + if err := testRW(c2, c1); err != nil { + t.Fatalf(err.Error()) + } +} + +func TestListener(t *testing.T) { + l := Listen(7) + var s net.Conn + var serr error + done := make(chan struct{}) + go func() { + s, serr = l.Accept() + close(done) + }() + c, cerr := l.Dial() + <-done + if cerr != nil || serr != nil { + t.Fatalf("cerr = %v, serr = %v; want nil, nil", cerr, serr) + } + if err := testRW(c, s); err != nil { + t.Fatalf(err.Error()) + } + if err := testRW(s, c); err != nil { + t.Fatalf(err.Error()) + } +} + +func TestCloseWhileDialing(t *testing.T) { + l := Listen(7) + var c net.Conn + var err error + done := make(chan struct{}) + go func() { + c, err = l.Dial() + close(done) + }() + l.Close() + <-done + if c != nil || err != errClosed { + t.Fatalf("c, err = %v, %v; want nil, %v", c, err, errClosed) + } +} + +func TestCloseWhileAccepting(t *testing.T) { + l := Listen(7) + var c net.Conn + var err error + done := make(chan struct{}) + go func() { + c, err = l.Accept() + close(done) + }() + l.Close() + <-done + if c != nil || err != errClosed { + t.Fatalf("c, err = %v, %v; want nil, %v", c, err, errClosed) + } +} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go index c88756edcc2b..9401b1dd8186 100644 --- a/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go +++ b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: perf.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: codec_perf/perf.proto /* Package codec_perf is a generated protocol buffer package. It is generated from these files: - perf.proto + codec_perf/perf.proto It has these top-level messages: Buffer @@ -51,13 +50,13 @@ func init() { proto.RegisterType((*Buffer)(nil), "codec.perf.Buffer") } -func init() { proto.RegisterFile("perf.proto", fileDescriptor0) } +func init() { proto.RegisterFile("codec_perf/perf.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 73 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x48, 0x2d, 0x4a, - 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4a, 0xce, 0x4f, 0x49, 0x4d, 0xd6, 0x03, 0x89, - 0x28, 0xc9, 0x70, 0xb1, 0x39, 0x95, 0xa6, 0xa5, 0xa5, 0x16, 0x09, 0x09, 0x71, 0xb1, 0x24, 0xe5, - 0xa7, 0x54, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x80, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x3a, 0x58, 0x92, 0x53, 0x36, 0x00, 0x00, 0x00, + // 78 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0xce, 0x4f, 0x49, + 0x4d, 0x8e, 0x2f, 0x48, 0x2d, 0x4a, 0xd3, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, + 0x5c, 0x60, 0x61, 0x3d, 0x90, 0x88, 0x92, 0x0c, 0x17, 0x9b, 0x53, 0x69, 0x5a, 0x5a, 0x6a, 0x91, + 0x90, 0x10, 0x17, 0x4b, 0x52, 0x7e, 0x4a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, + 0x0d, 0x08, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x4c, 0x5f, 0x41, 0x00, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.proto b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto index 4cf561f7f1e8..f42dbcafe0b4 100644 --- a/vendor/google.golang.org/grpc/test/codec_perf/perf.proto +++ b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Messages used for performance tests that may not reference grpc directly for // reasons of import cycles. syntax = "proto2"; diff --git a/vendor/google.golang.org/grpc/test/end2end_test.go b/vendor/google.golang.org/grpc/test/end2end_test.go index 6a36b6882e93..ad942ef79c40 100644 --- a/vendor/google.golang.org/grpc/test/end2end_test.go +++ b/vendor/google.golang.org/grpc/test/end2end_test.go @@ -1,37 +1,25 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -package grpc_test +//go:generate protoc --go_out=plugins=grpc:. codec_perf/perf.proto +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +package test import ( "bytes" @@ -40,15 +28,14 @@ import ( "flag" "fmt" "io" - "log" "math" "net" "os" "reflect" "runtime" - "sort" "strings" "sync" + "sync/atomic" "syscall" "testing" "time" @@ -59,17 +46,24 @@ import ( "golang.org/x/net/http2" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" + _ "google.golang.org/grpc/grpclog/glogger" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + _ "google.golang.org/grpc/resolver/passthrough" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/leakcheck" + "google.golang.org/grpc/testdata" ) var ( @@ -109,7 +103,7 @@ var ( }) ) -var raceMode bool // set by race_test.go in race mode +var raceMode bool // set by race.go in race mode type testServer struct { security string // indicate the authentication protocol used by this server. @@ -117,6 +111,7 @@ type testServer struct { setAndSendHeader bool // whether to call setHeader and sendHeader. setHeaderOnly bool // whether to only call setHeader, not sendHeader. multipleSetTrailer bool // whether to call setTrailer multiple times. + unaryCallSleepTime time.Duration } func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { @@ -212,7 +207,7 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* } } // Simulate some service delay. - time.Sleep(time.Second) + time.Sleep(s.unaryCallSleepTime) payload, err := newPayload(in.GetResponseType(), in.GetResponseSize()) if err != nil { @@ -309,6 +304,10 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ return nil } if err != nil { + // to facilitate testSvrWriteStatusEarlyWrite + if grpc.Code(err) == codes.ResourceExhausted { + return grpc.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error()) + } return err } cs := in.GetResponseParameters() @@ -325,6 +324,10 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: payload, }); err != nil { + // to facilitate testSvrWriteStatusEarlyWrite + if grpc.Code(err) == codes.ResourceExhausted { + return grpc.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error()) + } return err } } @@ -366,14 +369,13 @@ func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServ return nil } -const tlsDir = "testdata/" - type env struct { - name string - network string // The type of network such as tcp, unix, etc. - security string // The security protocol such as TLS, SSH, etc. - httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS - balancer bool // whether to use balancer + name string + network string // The type of network such as tcp, unix, etc. + security string // The security protocol such as TLS, SSH, etc. + httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS + balancer string // One of "roundrobin", "pickfirst", "v1", or "". + customDialer func(string, string, time.Duration) (net.Conn, error) } func (e env) runnable() bool { @@ -384,18 +386,20 @@ func (e env) runnable() bool { } func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) { + if e.customDialer != nil { + return e.customDialer(e.network, addr, timeout) + } return net.DialTimeout(e.network, addr, timeout) } var ( - tcpClearEnv = env{name: "tcp-clear", network: "tcp", balancer: true} - tcpTLSEnv = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: true} - unixClearEnv = env{name: "unix-clear", network: "unix", balancer: true} - unixTLSEnv = env{name: "unix-tls", network: "unix", security: "tls", balancer: true} - handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: true} - noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls", balancer: false} - // TODO add handlerEnv back when ServeHTTP is stable. - allEnv = []env{tcpClearEnv, tcpTLSEnv, unixClearEnv, unixTLSEnv /*handlerEnv,*/, noBalancerEnv} + tcpClearEnv = env{name: "tcp-clear-v1-balancer", network: "tcp", balancer: "v1"} + tcpTLSEnv = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls", balancer: "v1"} + tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "roundrobin"} + tcpTLSRREnv = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "roundrobin"} + handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "roundrobin"} + noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"} + allEnv = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv} ) var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.") @@ -431,20 +435,34 @@ type test struct { cancel context.CancelFunc // Configurable knobs, after newTest returns: - testServer testpb.TestServiceServer // nil means none - healthServer *health.Server // nil means disabled - maxStream uint32 - tapHandle tap.ServerInHandle - maxMsgSize int - userAgent string - clientCompression bool - serverCompression bool - unaryClientInt grpc.UnaryClientInterceptor - streamClientInt grpc.StreamClientInterceptor - unaryServerInt grpc.UnaryServerInterceptor - streamServerInt grpc.StreamServerInterceptor - unknownHandler grpc.StreamHandler - sc <-chan grpc.ServiceConfig + testServer testpb.TestServiceServer // nil means none + healthServer *health.Server // nil means disabled + maxStream uint32 + tapHandle tap.ServerInHandle + maxMsgSize *int + maxClientReceiveMsgSize *int + maxClientSendMsgSize *int + maxServerReceiveMsgSize *int + maxServerSendMsgSize *int + userAgent string + clientCompression bool + serverCompression bool + unaryClientInt grpc.UnaryClientInterceptor + streamClientInt grpc.StreamClientInterceptor + unaryServerInt grpc.UnaryServerInterceptor + streamServerInt grpc.StreamServerInterceptor + unknownHandler grpc.StreamHandler + sc <-chan grpc.ServiceConfig + customCodec grpc.Codec + serverInitialWindowSize int32 + serverInitialConnWindowSize int32 + clientInitialWindowSize int32 + clientInitialConnWindowSize int32 + perRPCCreds credentials.PerRPCCredentials + + // All test dialing is blocking by default. Set this to true if dial + // should be non-blocking. + nonBlockingDial bool // srv and srvAddr are set once startServer is called. srv *grpc.Server @@ -491,8 +509,14 @@ func (te *test) startServer(ts testpb.TestServiceServer) { te.testServer = ts te.t.Logf("Running test in %s environment...", te.e.name) sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)} - if te.maxMsgSize > 0 { - sopts = append(sopts, grpc.MaxMsgSize(te.maxMsgSize)) + if te.maxMsgSize != nil { + sopts = append(sopts, grpc.MaxMsgSize(*te.maxMsgSize)) + } + if te.maxServerReceiveMsgSize != nil { + sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize)) + } + if te.maxServerSendMsgSize != nil { + sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize)) } if te.tapHandle != nil { sopts = append(sopts, grpc.InTapHandle(te.tapHandle)) @@ -512,6 +536,12 @@ func (te *test) startServer(ts testpb.TestServiceServer) { if te.unknownHandler != nil { sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler)) } + if te.serverInitialWindowSize > 0 { + sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize)) + } + if te.serverInitialConnWindowSize > 0 { + sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize)) + } la := "localhost:0" switch te.e.network { case "unix": @@ -524,7 +554,7 @@ func (te *test) startServer(ts testpb.TestServiceServer) { } switch te.e.security { case "tls": - creds, err := credentials.NewServerTLSFromFile(tlsDir+"server1.pem", tlsDir+"server1.key") + creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { te.t.Fatalf("Failed to generate credentials %v", err) } @@ -534,6 +564,9 @@ func (te *test) startServer(ts testpb.TestServiceServer) { case "clientTimeoutCreds": sopts = append(sopts, grpc.Creds(&clientTimeoutCreds{})) } + if te.customCodec != nil { + sopts = append(sopts, grpc.CustomCodec(te.customCodec)) + } s := grpc.NewServer(sopts...) te.srv = s if te.e.httpHandler { @@ -585,12 +618,18 @@ func (te *test) clientConn() *grpc.ClientConn { if te.streamClientInt != nil { opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt)) } - if te.maxMsgSize > 0 { - opts = append(opts, grpc.WithMaxMsgSize(te.maxMsgSize)) + if te.maxMsgSize != nil { + opts = append(opts, grpc.WithMaxMsgSize(*te.maxMsgSize)) + } + if te.maxClientReceiveMsgSize != nil { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize))) + } + if te.maxClientSendMsgSize != nil { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize))) } switch te.e.security { case "tls": - creds, err := credentials.NewClientTLSFromFile(tlsDir+"ca.pem", "x.test.youtube.com") + creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { te.t.Fatalf("Failed to load credentials: %v", err) } @@ -602,13 +641,39 @@ func (te *test) clientConn() *grpc.ClientConn { default: opts = append(opts, grpc.WithInsecure()) } - if te.e.balancer { + // TODO(bar) switch balancer case "pickfirst". + var scheme string + switch te.e.balancer { + case "v1": opts = append(opts, grpc.WithBalancer(grpc.RoundRobin(nil))) + case "roundrobin": + rr := balancer.Get("roundrobin") + if rr == nil { + te.t.Fatalf("got nil when trying to get roundrobin balancer builder") + } + opts = append(opts, grpc.WithBalancerBuilder(rr)) + scheme = "passthrough:///" + } + if te.clientInitialWindowSize > 0 { + opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize)) + } + if te.clientInitialConnWindowSize > 0 { + opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize)) + } + if te.perRPCCreds != nil { + opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds)) + } + if te.customCodec != nil { + opts = append(opts, grpc.WithCodec(te.customCodec)) + } + if !te.nonBlockingDial && te.srvAddr != "" { + // Only do a blocking dial if server is up. + opts = append(opts, grpc.WithBlock()) } var err error - te.cc, err = grpc.Dial(te.srvAddr, opts...) + te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...) if err != nil { - te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) + te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err) } return te.cc } @@ -634,8 +699,55 @@ func (te *test) withServerTester(fn func(st *serverTester)) { fn(st) } +type lazyConn struct { + net.Conn + beLazy int32 +} + +func (l *lazyConn) Write(b []byte) (int, error) { + if atomic.LoadInt32(&(l.beLazy)) == 1 { + // The sleep duration here needs to less than the leakCheck deadline. + time.Sleep(time.Second) + } + return l.Conn.Write(b) +} + +func TestContextDeadlineNotIgnored(t *testing.T) { + defer leakcheck.Check(t) + e := noBalancerEnv + var lc *lazyConn + e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, timeout) + if err != nil { + return nil, err + } + lc = &lazyConn{Conn: conn} + return lc, nil + } + + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + atomic.StoreInt32(&(lc.beLazy), 1) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + t1 := time.Now() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err) + } + if time.Since(t1) > 2*time.Second { + t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline") + } +} + func TestTimeoutOnDeadServer(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testTimeoutOnDeadServer(t, e) } @@ -658,9 +770,10 @@ func testTimeoutOnDeadServer(t *testing.T, e env) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } te.srv.Stop() - ctx, _ := context.WithTimeout(context.Background(), time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)) - if e.balancer && grpc.Code(err) != codes.DeadlineExceeded { + cancel() + if e.balancer != "" && grpc.Code(err) != codes.DeadlineExceeded { // If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error, // the error will be an internal error. t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded) @@ -669,7 +782,7 @@ func testTimeoutOnDeadServer(t *testing.T, e env) { } func TestServerGracefulStopIdempotent(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -690,7 +803,7 @@ func testServerGracefulStopIdempotent(t *testing.T, e env) { } func TestServerGoAway(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -718,11 +831,12 @@ func testServerGoAway(t *testing.T, e env) { }() // Loop until the server side GoAway signal is propagated to the client. for { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err == nil { - continue + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && grpc.Code(err) != codes.DeadlineExceeded { + cancel() + break } - break + cancel() } // A new RPC should fail. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable && grpc.Code(err) != codes.Internal { @@ -733,7 +847,7 @@ func testServerGoAway(t *testing.T, e env) { } func TestServerGoAwayPendingRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -771,11 +885,12 @@ func testServerGoAwayPendingRPC(t *testing.T, e env) { }() // Loop until the server side GoAway signal is propagated to the client. for { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err == nil { - continue + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err != nil { + cancel() + break } - break + cancel() } respParam := []*testpb.ResponseParameters{ { @@ -804,7 +919,7 @@ func testServerGoAwayPendingRPC(t *testing.T, e env) { } func TestServerMultipleGoAwayPendingRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -847,11 +962,12 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { }() // Loop until the server side GoAway signal is propagated to the client. for { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err == nil { - continue + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err != nil { + cancel() + break } - break + cancel() } select { case <-ch1: @@ -891,7 +1007,7 @@ func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { } func TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -929,7 +1045,7 @@ func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { } func TestConcurrentServerStopAndGoAway(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -966,11 +1082,12 @@ func testConcurrentServerStopAndGoAway(t *testing.T, e env) { }() // Loop until the server side GoAway signal is propagated to the client. for { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err == nil { - continue + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); err != nil { + cancel() + break } - break + cancel() } // Stop the server and close all the connections. te.srv.Stop() @@ -998,7 +1115,7 @@ func testConcurrentServerStopAndGoAway(t *testing.T, e env) { } func TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -1022,7 +1139,7 @@ func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { te.srv.GracefulStop() close(done) }() - time.Sleep(time.Second) + time.Sleep(50 * time.Millisecond) cc.Close() timeout := time.NewTimer(time.Second) select { @@ -1033,7 +1150,7 @@ func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { } func TestFailFast(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testFailFast(t, e) } @@ -1077,16 +1194,10 @@ func testFailFast(t *testing.T, e env) { awaitNewConnLogOutput() } -func TestServiceConfig(t *testing.T) { - defer leakCheck(t)() - for _, e := range listTestEnv() { - testServiceConfig(t, e) - } -} - -func testServiceConfig(t *testing.T, e env) { +func testServiceConfigSetup(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { te := newTest(t, e) - ch := make(chan grpc.ServiceConfig) + // We write before read. + ch := make(chan grpc.ServiceConfig, 1) te.sc = ch te.userAgent = testAppUA te.declareLogNoise( @@ -1095,48 +1206,64 @@ func testServiceConfig(t *testing.T, e env) { "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) + return te, ch +} + +func newBool(b bool) (a *bool) { + return &b +} + +func newInt(b int) (a *int) { + return &b +} + +func newDuration(b time.Duration) (a *time.Duration) { + a = new(time.Duration) + *a = b + return +} + +func TestServiceConfigGetMethodConfig(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testGetMethodConfig(t, e) + } +} + +func testGetMethodConfig(t *testing.T, e env) { + te, ch := testServiceConfigSetup(t, e) defer te.tearDown() - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - mc := grpc.MethodConfig{ - WaitForReady: true, - Timeout: time.Millisecond, - } - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - }() + mc1 := grpc.MethodConfig{ + WaitForReady: newBool(true), + Timeout: newDuration(time.Millisecond), + } + mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } - if _, err := tc.FullDuplexCall(context.Background()); grpc.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - wg.Wait() - // Generate a service config update. - mc := grpc.MethodConfig{ - WaitForReady: false, - } - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ + + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc = grpc.ServiceConfig{ Methods: m, } ch <- sc - // Loop until the new update becomes effective. + // Wait for the new service config to propagate. for { - if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) == codes.DeadlineExceeded { continue } break @@ -1145,156 +1272,746 @@ func testServiceConfig(t *testing.T, e env) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) } - if _, err := tc.FullDuplexCall(context.Background()); grpc.Code(err) != codes.Unavailable { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.Unavailable) - } } -func TestTap(t *testing.T) { - defer leakCheck(t)() +func TestServiceConfigWaitForReady(t *testing.T) { + defer leakcheck.Check(t) for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testTap(t, e) + testServiceConfigWaitForReady(t, e) } } -type myTap struct { - cnt int -} +func testServiceConfigWaitForReady(t *testing.T, e env) { + te, ch := testServiceConfigSetup(t, e) + defer te.tearDown() -func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) { - if info != nil { - if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" { - t.cnt++ - } else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" { - return nil, fmt.Errorf("tap error") - } + // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + WaitForReady: newBool(false), + Timeout: newDuration(time.Millisecond), } - return ctx, nil -} - -func testTap(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - ttap := &myTap{} - te.tapHandle = ttap.handle - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) - if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } - if ttap.cnt != 1 { - t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt) + if _, err := tc.FullDuplexCall(context.Background(), grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31) - if err != nil { - t.Fatal(err) + // Generate a service config update. + // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. + mc.WaitForReady = newBool(true) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, } + ch <- sc - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseSize: proto.Int32(45), - Payload: payload, + // Wait for the new service config to take effect. + mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") + for { + if !*mc.WaitForReady { + time.Sleep(100 * time.Millisecond) + mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") + continue + } + break } - if _, err := tc.UnaryCall(context.Background(), req); grpc.Code(err) != codes.Unavailable { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } -} - -func healthCheck(d time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { - ctx, _ := context.WithTimeout(context.Background(), d) - hc := healthpb.NewHealthClient(cc) - req := &healthpb.HealthCheckRequest{ - Service: serviceName, + if _, err := tc.FullDuplexCall(context.Background()); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } - return hc.Check(ctx, req) } -func TestHealthCheckOnSuccess(t *testing.T) { - defer leakCheck(t)() +func TestServiceConfigTimeout(t *testing.T) { + defer leakcheck.Check(t) for _, e := range listTestEnv() { - testHealthCheckOnSuccess(t, e) + testServiceConfigTimeout(t, e) } } -func testHealthCheckOnSuccess(t *testing.T, e env) { - te := newTest(t, e) - hs := health.NewServer() - hs.SetServingStatus("grpc.health.v1.Health", 1) - te.healthServer = hs - te.startServer(&testServer{security: e.security}) +func testServiceConfigTimeout(t *testing.T, e env) { + te, ch := testServiceConfigSetup(t, e) defer te.tearDown() - cc := te.clientConn() - if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1.Health"); err != nil { - t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + Timeout: newDuration(time.Hour), } -} - -func TestHealthCheckOnFailure(t *testing.T) { - defer leakCheck(t)() - for _, e := range listTestEnv() { - testHealthCheckOnFailure(t, e) + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, } -} - -func testHealthCheckOnFailure(t *testing.T, e env) { - defer leakCheck(t)() - te := newTest(t, e) - te.declareLogNoise( - "Failed to dial ", - "grpc: the client connection is closing; please retry", - ) - hs := health.NewServer() - hs.SetServingStatus("grpc.health.v1.HealthCheck", 1) - te.healthServer = hs - te.startServer(&testServer{security: e.security}) - defer te.tearDown() + ch <- sc cc := te.clientConn() - wantErr := grpc.Errorf(codes.DeadlineExceeded, "context deadline exceeded") - if _, err := healthCheck(0*time.Second, cc, "grpc.health.v1.Health"); !reflect.DeepEqual(err, wantErr) { - t.Fatalf("Health/Check(_, _) = _, %v, want _, error code %s", err, codes.DeadlineExceeded) + tc := testpb.NewTestServiceClient(cc) + // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } - awaitNewConnLogOutput() -} + cancel() + ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() -func TestHealthCheckOff(t *testing.T) { - defer leakCheck(t)() - for _, e := range listTestEnv() { - // TODO(bradfitz): Temporarily skip this env due to #619. - if e.name == "handler-tls" { + // Generate a service config update. + // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc.Timeout = newDuration(time.Nanosecond) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + for { + if *mc.Timeout != time.Nanosecond { + time.Sleep(100 * time.Millisecond) + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") continue } - testHealthCheckOff(t, e) + break + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.FullDuplexCall(ctx, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() } -func testHealthCheckOff(t *testing.T, e env) { - te := newTest(t, e) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - want := grpc.Errorf(codes.Unimplemented, "unknown service grpc.health.v1.Health") - if _, err := healthCheck(1*time.Second, te.clientConn(), ""); !reflect.DeepEqual(err, want) { - t.Fatalf("Health/Check(_, _) = _, %v, want _, %v", err, want) +func TestServiceConfigMaxMsgSize(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testServiceConfigMaxMsgSize(t, e) } } -func TestUnknownHandler(t *testing.T) { - defer leakCheck(t)() - // An example unknownHandler that returns a different code and a different method, making sure that we do not - // expose what methods are implemented to a client that is not authenticated. +func testServiceConfigMaxMsgSize(t *testing.T, e env) { + // Setting up values and objects shared across all test cases. + const smallSize = 1 + const largeSize = 1024 + const extraLargeSize = 2048 + + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) + if err != nil { + t.Fatal(err) + } + + mc := grpc.MethodConfig{ + MaxReqSize: newInt(extraLargeSize), + MaxRespSize: newInt(extraLargeSize), + } + + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te1, ch1 := testServiceConfigSetup(t, e) + te1.startServer(&testServer{security: e.security}) + defer te1.tearDown() + + ch1 <- sc + tc := testpb.NewTestServiceClient(te1.clientConn()) + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(extraLargeSize)), + Payload: smallPayload, + } + // Test for unary RPC recv. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = extraLargePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(extraLargeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + stream, err := tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = extraLargePayload + stream, err = tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te2, ch2 := testServiceConfigSetup(t, e) + te2.maxClientReceiveMsgSize = newInt(1024) + te2.maxClientSendMsgSize = newInt(1024) + te2.startServer(&testServer{security: e.security}) + defer te2.tearDown() + ch2 <- sc + tc = testpb.NewTestServiceClient(te2.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = proto.Int32(int32(largeSize)) + + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te2.ctx) + respParam[0].Size = proto.Int32(int32(largeSize)) + sreq.Payload = smallPayload + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te2.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te3, ch3 := testServiceConfigSetup(t, e) + te3.maxClientReceiveMsgSize = newInt(4096) + te3.maxClientSendMsgSize = newInt(4096) + te3.startServer(&testServer{security: e.security}) + defer te3.tearDown() + ch3 <- sc + tc = testpb.NewTestServiceClient(te3.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = proto.Int32(int32(largeSize)) + + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.ResponseSize = proto.Int32(int32(extraLargeSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.Payload = extraLargePayload + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam[0].Size = proto.Int32(int32(largeSize)) + sreq.Payload = smallPayload + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want ", stream, err) + } + + respParam[0].Size = proto.Int32(int32(extraLargeSize)) + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + sreq.Payload = extraLargePayload + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } +} + +func TestMaxMsgSizeClientDefault(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMaxMsgSizeClientDefault(t, e) + } +} + +func testMaxMsgSizeClientDefault(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const smallSize = 1 + const largeSize = 4 * 1024 * 1024 + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeSize)), + Payload: smallPayload, + } + // Test for unary RPC recv. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(largeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + + // Test for streaming RPC recv. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } +} + +func TestMaxMsgSizeClientAPI(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMaxMsgSizeClientAPI(t, e) + } +} + +func testMaxMsgSizeClientAPI(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + // To avoid error on server side. + te.maxServerSendMsgSize = newInt(5 * 1024 * 1024) + te.maxClientReceiveMsgSize = newInt(1024) + te.maxClientSendMsgSize = newInt(1024) + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const smallSize = 1 + const largeSize = 1024 + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeSize)), + Payload: smallPayload, + } + // Test for unary RPC recv. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(largeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + + // Test for streaming RPC recv. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } +} + +func TestMaxMsgSizeServerAPI(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testMaxMsgSizeServerAPI(t, e) + } +} + +func testMaxMsgSizeServerAPI(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.maxServerReceiveMsgSize = newInt(1024) + te.maxServerSendMsgSize = newInt(1024) + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + const smallSize = 1 + const largeSize = 1024 + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(int32(largeSize)), + Payload: smallPayload, + } + // Test for unary RPC send. + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC recv. + req.Payload = largePayload + req.ResponseSize = proto.Int32(int32(smallSize)) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(largeSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: smallPayload, + } + + // Test for streaming RPC send. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + respParam[0].Size = proto.Int32(int32(smallSize)) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } +} + +func TestTap(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testTap(t, e) + } +} + +type myTap struct { + cnt int +} + +func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) { + if info != nil { + if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" { + t.cnt++ + } else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" { + return nil, fmt.Errorf("tap error") + } + } + return ctx, nil +} + +func testTap(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + ttap := &myTap{} + te.tapHandle = ttap.handle + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + if ttap.cnt != 1 { + t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt) + } + + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31) + if err != nil { + t.Fatal(err) + } + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseSize: proto.Int32(45), + Payload: payload, + } + if _, err := tc.UnaryCall(context.Background(), req); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + } +} + +func healthCheck(d time.Duration, cc *grpc.ClientConn, serviceName string) (*healthpb.HealthCheckResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + hc := healthpb.NewHealthClient(cc) + req := &healthpb.HealthCheckRequest{ + Service: serviceName, + } + return hc.Check(ctx, req) +} + +func TestHealthCheckOnSuccess(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testHealthCheckOnSuccess(t, e) + } +} + +func testHealthCheckOnSuccess(t *testing.T, e env) { + te := newTest(t, e) + hs := health.NewServer() + hs.SetServingStatus("grpc.health.v1.Health", 1) + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + if _, err := healthCheck(1*time.Second, cc, "grpc.health.v1.Health"); err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } +} + +func TestHealthCheckOnFailure(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testHealthCheckOnFailure(t, e) + } +} + +func testHealthCheckOnFailure(t *testing.T, e env) { + defer leakcheck.Check(t) + te := newTest(t, e) + te.declareLogNoise( + "Failed to dial ", + "grpc: the client connection is closing; please retry", + ) + hs := health.NewServer() + hs.SetServingStatus("grpc.health.v1.HealthCheck", 1) + te.healthServer = hs + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + wantErr := grpc.Errorf(codes.DeadlineExceeded, "context deadline exceeded") + if _, err := healthCheck(0*time.Second, cc, "grpc.health.v1.Health"); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, error code %s", err, codes.DeadlineExceeded) + } + awaitNewConnLogOutput() +} + +func TestHealthCheckOff(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testHealthCheckOff(t, e) + } +} + +func testHealthCheckOff(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + want := grpc.Errorf(codes.Unimplemented, "unknown service grpc.health.v1.Health") + if _, err := healthCheck(1*time.Second, te.clientConn(), ""); !reflect.DeepEqual(err, want) { + t.Fatalf("Health/Check(_, _) = _, %v, want _, %v", err, want) + } +} + +func TestUnknownHandler(t *testing.T) { + defer leakcheck.Check(t) + // An example unknownHandler that returns a different code and a different method, making sure that we do not + // expose what methods are implemented to a client that is not authenticated. unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { return grpc.Errorf(codes.Unauthenticated, "user unauthenticated") } @@ -1319,7 +2036,7 @@ func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) } func TestHealthCheckServingStatus(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testHealthCheckServingStatus(t, e) } @@ -1364,7 +2081,7 @@ func testHealthCheckServingStatus(t *testing.T, e env) { } func TestErrorChanNoIO(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testErrorChanNoIO(t, e) } @@ -1382,7 +2099,7 @@ func testErrorChanNoIO(t *testing.T, e env) { } func TestEmptyUnaryWithUserAgent(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testEmptyUnaryWithUserAgent(t, e) } @@ -1409,8 +2126,13 @@ func testEmptyUnaryWithUserAgent(t *testing.T, e env) { } func TestFailedEmptyUnary(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // This test covers status details, but + // Grpc-Status-Details-Bin is not support in handler_server. + continue + } testFailedEmptyUnary(t, e) } } @@ -1430,7 +2152,7 @@ func testFailedEmptyUnary(t *testing.T, e env) { } func TestLargeUnary(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testLargeUnary(t, e) } @@ -1466,8 +2188,9 @@ func testLargeUnary(t *testing.T, e env) { } } +// Test backward-compatibility API for setting msg size limit. func TestExceedMsgLimit(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testExceedMsgLimit(t, e) } @@ -1475,12 +2198,12 @@ func TestExceedMsgLimit(t *testing.T) { func testExceedMsgLimit(t *testing.T, e env) { te := newTest(t, e) - te.maxMsgSize = 1024 + te.maxMsgSize = newInt(1024) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) - argSize := int32(te.maxMsgSize + 1) + argSize := int32(*te.maxMsgSize + 1) const smallSize = 1 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) @@ -1492,23 +2215,23 @@ func testExceedMsgLimit(t *testing.T, e env) { t.Fatal(err) } - // test on server side for unary RPC + // Test on server side for unary RPC. req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(smallSize), Payload: payload, } - if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.Internal { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.Internal) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } - // test on client side for unary RPC - req.ResponseSize = proto.Int32(int32(te.maxMsgSize) + 1) + // Test on client side for unary RPC. + req.ResponseSize = proto.Int32(int32(*te.maxMsgSize) + 1) req.Payload = smallPayload - if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.Internal { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.Internal) + if _, err := tc.UnaryCall(context.Background(), req); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } - // test on server side for streaming RPC + // Test on server side for streaming RPC. stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) @@ -1519,7 +2242,7 @@ func testExceedMsgLimit(t *testing.T, e env) { }, } - spayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(te.maxMsgSize+1)) + spayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(*te.maxMsgSize+1)) if err != nil { t.Fatal(err) } @@ -1532,28 +2255,28 @@ func testExceedMsgLimit(t *testing.T, e env) { if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } - if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.Internal { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.Internal) + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } - // test on client side for streaming RPC + // Test on client side for streaming RPC. stream, err = tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } - respParam[0].Size = proto.Int32(int32(te.maxMsgSize) + 1) + respParam[0].Size = proto.Int32(int32(*te.maxMsgSize) + 1) sreq.Payload = smallPayload if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } - if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.Internal { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.Internal) + if _, err := stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } } func TestPeerClientSide(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testPeerClientSide(t, e) } @@ -1593,7 +2316,7 @@ func testPeerClientSide(t *testing.T, e env) { // doesn't cause a segmentation fault. // issue#1141 https://github.com/grpc/grpc-go/issues/1141 func TestPeerNegative(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testPeerNegative(t, e) } @@ -1612,8 +2335,64 @@ func testPeerNegative(t *testing.T, e env) { tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) } +func TestPeerFailedRPC(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPeerFailedRPC(t, e) + } +} + +func testPeerFailedRPC(t *testing.T, e env) { + te := newTest(t, e) + te.maxServerReceiveMsgSize = newInt(1 * 1024) + te.startServer(&testServer{security: e.security}) + + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + + // first make a successful request to the server + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + + // make a second request that will be rejected by the server + const largeSize = 5 * 1024 + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + Payload: largePayload, + } + + peer := new(peer.Peer) + if _, err := tc.UnaryCall(context.Background(), req, grpc.Peer(peer)); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } else { + pa := peer.Addr.String() + if e.network == "unix" { + if pa != te.srvAddr { + t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr) + } + return + } + _, pp, err := net.SplitHostPort(pa) + if err != nil { + t.Fatalf("Failed to parse address from peer.") + } + _, sp, err := net.SplitHostPort(te.srvAddr) + if err != nil { + t.Fatalf("Failed to parse address of test server.") + } + if pp != sp { + t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp) + } + } +} + func TestMetadataUnaryRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testMetadataUnaryRPC(t, e) } @@ -1658,7 +2437,7 @@ func testMetadataUnaryRPC(t *testing.T, e env) { } func TestMultipleSetTrailerUnaryRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testMultipleSetTrailerUnaryRPC(t, e) } @@ -1696,7 +2475,7 @@ func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) { } func TestMultipleSetTrailerStreamingRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testMultipleSetTrailerStreamingRPC(t, e) } @@ -1728,7 +2507,7 @@ func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) { } func TestSetAndSendHeaderUnaryRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -1771,7 +2550,7 @@ func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { } func TestMultipleSetHeaderUnaryRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -1815,7 +2594,7 @@ func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { } func TestMultipleSetHeaderUnaryRPCError(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -1858,7 +2637,7 @@ func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { } func TestSetAndSendHeaderStreamingRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -1902,7 +2681,7 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { } func TestMultipleSetHeaderStreamingRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -1966,7 +2745,7 @@ func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { } func TestMultipleSetHeaderStreamingRPCError(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -2029,8 +2808,13 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { // TestMalformedHTTP2Metedata verfies the returned error when the client // sends an illegal metadata. func TestMalformedHTTP2Metadata(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // Failed with "server stops accepting new RPCs". + // Server stops accepting new RPCs when the client sends an illegal http2 header. + continue + } testMalformedHTTP2Metadata(t, e) } } @@ -2087,8 +2871,12 @@ func performOneRPC(t *testing.T, tc testpb.TestServiceClient, wg *sync.WaitGroup } func TestRetry(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // Fails with RST_STREAM / FLOW_CONTROL_ERROR + continue + } testRetry(t, e) } } @@ -2139,7 +2927,7 @@ func testRetry(t *testing.T, e env) { } func TestRPCTimeout(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testRPCTimeout(t, e) } @@ -2148,7 +2936,7 @@ func TestRPCTimeout(t *testing.T) { // TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism. func testRPCTimeout(t *testing.T, e env) { te := newTest(t, e) - te.startServer(&testServer{security: e.security}) + te.startServer(&testServer{security: e.security, unaryCallSleepTime: 50 * time.Millisecond}) defer te.tearDown() cc := te.clientConn() @@ -2168,15 +2956,16 @@ func testRPCTimeout(t *testing.T, e env) { Payload: payload, } for i := -1; i <= 10; i++ { - ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) if _, err := tc.UnaryCall(ctx, req); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want , error code: %s", err, codes.DeadlineExceeded) } + cancel() } } func TestCancel(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testCancel(t, e) } @@ -2185,7 +2974,7 @@ func TestCancel(t *testing.T) { func testCancel(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise("grpc: the client connection is closing; please retry") - te.startServer(&testServer{security: e.security}) + te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second}) defer te.tearDown() cc := te.clientConn() @@ -2213,7 +3002,7 @@ func testCancel(t *testing.T, e env) { } func TestCancelNoIO(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testCancelNoIO(t, e) } @@ -2245,11 +3034,10 @@ func testCancelNoIO(t *testing.T, e env) { // succeeding. // TODO(bradfitz): add internal test hook for this (Issue 534) for { - ctx, cancelSecond := context.WithTimeout(context.Background(), 250*time.Millisecond) + ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond) _, err := tc.StreamingInputCall(ctx) cancelSecond() if err == nil { - time.Sleep(50 * time.Millisecond) continue } if grpc.Code(err) == codes.DeadlineExceeded { @@ -2260,21 +3048,19 @@ func testCancelNoIO(t *testing.T, e env) { // If there are any RPCs in flight before the client receives // the max streams setting, let them be expired. // TODO(bradfitz): add internal test hook for this (Issue 534) - time.Sleep(500 * time.Millisecond) + time.Sleep(50 * time.Millisecond) - ch := make(chan struct{}) go func() { - defer close(ch) - - // This should be blocked until the 1st is canceled. - ctx, cancelThird := context.WithTimeout(context.Background(), 2*time.Second) - if _, err := tc.StreamingInputCall(ctx); err != nil { - t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) - } - cancelThird() + time.Sleep(50 * time.Millisecond) + cancelFirst() }() - cancelFirst() - <-ch + + // This should be blocked until the 1st is canceled, then succeed. + ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond) + if _, err := tc.StreamingInputCall(ctx); err != nil { + t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) + } + cancelThird() } // The following tests the gRPC streaming RPC implementations. @@ -2285,7 +3071,7 @@ var ( ) func TestNoService(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testNoService(t, e) } @@ -2309,7 +3095,7 @@ func testNoService(t *testing.T, e env) { } func TestPingPong(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testPingPong(t, e) } @@ -2369,7 +3155,7 @@ func testPingPong(t *testing.T, e env) { } func TestMetadataStreamingRPC(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testMetadataStreamingRPC(t, e) } @@ -2403,32 +3189,35 @@ func testMetadataStreamingRPC(t *testing.T, e env) { if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } - var index int - for index < len(reqSizes) { - respParam := []*testpb.ResponseParameters{ - { - Size: proto.Int32(int32(respSizes[index])), - }, - } + err = func() error { + for index := 0; index < len(reqSizes); index++ { + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(respSizes[index])), + }, + } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) - if err != nil { - t.Fatal(err) - } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) + if err != nil { + return err + } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), - ResponseParameters: respParam, - Payload: payload, - } - if err := stream.Send(req); err != nil { - t.Errorf("%v.Send(%v) = %v, want ", stream, req, err) - return + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(req); err != nil { + return fmt.Errorf("%v.Send(%v) = %v, want ", stream, req, err) + } } - index++ - } + return nil + }() // Tell the server we're done sending args. stream.CloseSend() + if err != nil { + t.Error(err) + } }() for { if _, err := stream.Recv(); err != nil { @@ -2442,7 +3231,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { } func TestServerStreaming(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testServerStreaming(t, e) } @@ -2497,7 +3286,7 @@ func testServerStreaming(t *testing.T, e env) { } func TestFailedServerStreaming(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testFailedServerStreaming(t, e) } @@ -2554,7 +3343,7 @@ func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCa // Tests doing a bunch of concurrent streaming output calls. func TestServerStreamingConcurrent(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testServerStreamingConcurrent(t, e) } @@ -2611,26 +3400,52 @@ func testServerStreamingConcurrent(t *testing.T, e env) { } +func generatePayloadSizes() [][]int { + reqSizes := [][]int{ + {27182, 8, 1828, 45904}, + } + + num8KPayloads := 1024 + eightKPayloads := []int{} + for i := 0; i < num8KPayloads; i++ { + eightKPayloads = append(eightKPayloads, (1 << 13)) + } + reqSizes = append(reqSizes, eightKPayloads) + + num2MPayloads := 8 + twoMPayloads := []int{} + for i := 0; i < num2MPayloads; i++ { + twoMPayloads = append(twoMPayloads, (1 << 21)) + } + reqSizes = append(reqSizes, twoMPayloads) + + return reqSizes +} + func TestClientStreaming(t *testing.T) { - defer leakCheck(t)() - for _, e := range listTestEnv() { - testClientStreaming(t, e) + defer leakcheck.Check(t) + for _, s := range generatePayloadSizes() { + for _, e := range listTestEnv() { + testClientStreaming(t, e, s) + } } } -func testClientStreaming(t *testing.T, e env) { +func testClientStreaming(t *testing.T, e env, sizes []int) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) - stream, err := tc.StreamingInputCall(te.ctx) + ctx, cancel := context.WithTimeout(te.ctx, time.Second*30) + defer cancel() + stream, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) } var sum int - for _, s := range reqSizes { + for _, s := range sizes { payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) if err != nil { t.Fatal(err) @@ -2654,7 +3469,7 @@ func testClientStreaming(t *testing.T, e env) { } func TestClientStreamingError(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { if e.name == "handler-tls" { continue @@ -2697,7 +3512,7 @@ func testClientStreamingError(t *testing.T, e env) { } func TestExceedMaxStreamsLimit(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testExceedMaxStreamsLimit(t, e) } @@ -2723,11 +3538,11 @@ func testExceedMaxStreamsLimit(t *testing.T, e env) { } // Loop until receiving the new max stream setting from the server. for { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() _, err := tc.StreamingInputCall(ctx) if err == nil { - time.Sleep(time.Second) + time.Sleep(50 * time.Millisecond) continue } if grpc.Code(err) == codes.DeadlineExceeded { @@ -2740,8 +3555,12 @@ func testExceedMaxStreamsLimit(t *testing.T, e env) { const defaultMaxStreamsClient = 100 func TestExceedDefaultMaxStreamsLimit(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { + if e.name == "handler-tls" { + // The default max stream limit in handler_server is not 100? + continue + } testExceedDefaultMaxStreamsLimit(t, e) } } @@ -2771,7 +3590,7 @@ func testExceedDefaultMaxStreamsLimit(t *testing.T, e env) { } // Trying to create one more should timeout. - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() _, err := tc.StreamingInputCall(ctx) if err == nil || grpc.Code(err) != codes.DeadlineExceeded { @@ -2780,7 +3599,7 @@ func testExceedDefaultMaxStreamsLimit(t *testing.T, e env) { } func TestStreamsQuotaRecovery(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testStreamsQuotaRecovery(t, e) } @@ -2804,11 +3623,11 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { } // Loop until the new max stream setting is effective. for { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() _, err := tc.StreamingInputCall(ctx) if err == nil { - time.Sleep(time.Second) + time.Sleep(50 * time.Millisecond) continue } if grpc.Code(err) == codes.DeadlineExceeded { @@ -2824,7 +3643,8 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { defer wg.Done() payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314) if err != nil { - t.Fatal(err) + t.Error(err) + return } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), @@ -2832,7 +3652,8 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { Payload: payload, } // No rpc should go through due to the max streams limit. - ctx, _ := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() if _, err := tc.UnaryCall(ctx, req, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded { t.Errorf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } @@ -2842,7 +3663,7 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { } func TestCompressServerHasNoSupport(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testCompressServerHasNoSupport(t, e) } @@ -2898,7 +3719,7 @@ func testCompressServerHasNoSupport(t *testing.T, e env) { } func TestCompressOK(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testCompressOK(t, e) } @@ -2958,7 +3779,7 @@ func testCompressOK(t *testing.T, e env) { } func TestUnaryClientInterceptor(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testUnaryClientInterceptor(t, e) } @@ -2986,7 +3807,7 @@ func testUnaryClientInterceptor(t *testing.T, e env) { } func TestStreamClientInterceptor(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testStreamClientInterceptor(t, e) } @@ -3027,7 +3848,7 @@ func testStreamClientInterceptor(t *testing.T, e env) { } func TestUnaryServerInterceptor(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testUnaryServerInterceptor(t, e) } @@ -3050,7 +3871,7 @@ func testUnaryServerInterceptor(t *testing.T, e env) { } func TestStreamServerInterceptor(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { // TODO(bradfitz): Temporarily skip this env due to #619. if e.name == "handler-tls" { @@ -3127,7 +3948,7 @@ func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInput } func TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testClientRequestBodyErrorUnexpectedEOF(t, e) } @@ -3151,7 +3972,7 @@ func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) { } func TestClientRequestBodyErrorCloseAfterLength(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testClientRequestBodyErrorCloseAfterLength(t, e) } @@ -3176,7 +3997,7 @@ func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) { } func TestClientRequestBodyErrorCancel(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testClientRequestBodyErrorCancel(t, e) } @@ -3213,7 +4034,7 @@ func testClientRequestBodyErrorCancel(t *testing.T, e env) { } func TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) { - defer leakCheck(t)() + defer leakcheck.Check(t) for _, e := range listTestEnv() { testClientRequestBodyErrorCancelStreamingInput(t, e) } @@ -3270,7 +4091,7 @@ func (c clientAlwaysFailCred) OverrideServerName(s string) error { } func TestDialWithBlockErrorOnBadCertificates(t *testing.T) { - te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: true}) + te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: "v1"}) te.startServer(&testServer{security: te.e.security}) defer te.tearDown() @@ -3285,42 +4106,6 @@ func TestDialWithBlockErrorOnBadCertificates(t *testing.T) { } } -func TestFailFastRPCErrorOnBadCertificates(t *testing.T) { - te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: true}) - te.startServer(&testServer{security: te.e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); !strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { - te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg) - } -} - -func TestFailFastRPCWithNoBalancerErrorOnBadCertificates(t *testing.T) { - te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: false}) - te.startServer(&testServer{security: te.e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); !strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { - te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg) - } -} - -func TestNonFailFastRPCWithNoBalancerErrorOnBadCertificates(t *testing.T) { - te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: false}) - te.startServer(&testServer{security: te.e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); !strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { - te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg) - } -} - type clientTimeoutCreds struct { timeoutReturned bool } @@ -3346,7 +4131,7 @@ func (c *clientTimeoutCreds) OverrideServerName(s string) error { } func TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { - te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "clientTimeoutCreds", balancer: false}) + te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "clientTimeoutCreds", balancer: "v1"}) te.userAgent = testAppUA te.startServer(&testServer{security: te.e.security}) defer te.tearDown() @@ -3360,21 +4145,22 @@ func TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { } type serverDispatchCred struct { - ready chan struct{} - rawConn net.Conn + rawConnCh chan net.Conn } func newServerDispatchCred() *serverDispatchCred { return &serverDispatchCred{ - ready: make(chan struct{}), + rawConnCh: make(chan net.Conn, 1), } } func (c *serverDispatchCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return rawConn, nil, nil } func (c *serverDispatchCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - c.rawConn = rawConn - close(c.ready) + select { + case c.rawConnCh <- rawConn: + default: + } return nil, nil, credentials.ErrConnDispatched } func (c *serverDispatchCred) Info() credentials.ProtocolInfo { @@ -3387,12 +4173,11 @@ func (c *serverDispatchCred) OverrideServerName(s string) error { return nil } func (c *serverDispatchCred) getRawConn() net.Conn { - <-c.ready - return c.rawConn + return <-c.rawConnCh } func TestServerCredsDispatch(t *testing.T) { - lis, err := net.Listen("tcp", ":0") + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } @@ -3407,17 +4192,65 @@ func TestServerCredsDispatch(t *testing.T) { } defer cc.Close() + rawConn := cred.getRawConn() + // Give grpc a chance to see the error and potentially close the connection. + // And check that connection is not closed after that. + time.Sleep(100 * time.Millisecond) // Check rawConn is not closed. - if n, err := cred.getRawConn().Write([]byte{0}); n <= 0 || err != nil { + if n, err := rawConn.Write([]byte{0}); n <= 0 || err != nil { t.Errorf("Read() = %v, %v; want n>0, ", n, err) } } +type clientFailCreds struct { + got string +} + +func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, nil, fmt.Errorf("client handshake fails with fatal error") +} +func (c *clientFailCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (c *clientFailCreds) Clone() credentials.TransportCredentials { + return c +} +func (c *clientFailCreds) OverrideServerName(s string) error { + return nil +} + +// This test makes sure that failfast RPCs fail if client handshake fails with +// fatal errors. +func TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + defer lis.Close() + + cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{})) + if err != nil { + t.Fatalf("grpc.Dial(_) = %v", err) + } + defer cc.Close() + + tc := testpb.NewTestServiceClient(cc) + // This unary call should fail, but not timeout. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(true)); grpc.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want ", err) + } +} + func TestFlowControlLogicalRace(t *testing.T) { // Test for a regression of https://github.com/grpc/grpc-go/issues/632, // and other flow control bugs. - defer leakCheck(t)() + defer leakcheck.Check(t) const ( itemCount = 100 @@ -3425,7 +4258,7 @@ func TestFlowControlLogicalRace(t *testing.T) { recvCount = 2 maxFailures = 3 - requestTimeout = time.Second + requestTimeout = time.Second * 5 ) requestCount := 10000 @@ -3433,7 +4266,7 @@ func TestFlowControlLogicalRace(t *testing.T) { requestCount = 1000 } - lis, err := net.Listen("tcp", ":0") + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } @@ -3499,99 +4332,29 @@ func TestFlowControlLogicalRace(t *testing.T) { type flowControlLogicalRaceServer struct { testpb.TestServiceServer - itemSize int - itemCount int -} - -func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error { - for i := 0; i < s.itemCount; i++ { - err := srv.Send(&testpb.StreamingOutputCallResponse{ - Payload: &testpb.Payload{ - // Sending a large stream of data which the client reject - // helps to trigger some types of flow control bugs. - // - // Reallocating memory here is inefficient, but the stress it - // puts on the GC leads to more frequent flow control - // failures. The GC likely causes more variety in the - // goroutine scheduling orders. - Body: bytes.Repeat([]byte("a"), s.itemSize), - }, - }) - if err != nil { - return err - } - } - return nil -} - -// interestingGoroutines returns all goroutines we care about for the purpose -// of leak checking. It excludes testing or runtime ones. -func interestingGoroutines() (gs []string) { - buf := make([]byte, 2<<20) - buf = buf[:runtime.Stack(buf, true)] - for _, g := range strings.Split(string(buf), "\n\n") { - sl := strings.SplitN(g, "\n", 2) - if len(sl) != 2 { - continue - } - stack := strings.TrimSpace(sl[1]) - if strings.HasPrefix(stack, "testing.RunTests") { - continue - } - - if stack == "" || - strings.Contains(stack, "testing.Main(") || - strings.Contains(stack, "testing.tRunner(") || - strings.Contains(stack, "runtime.goexit") || - strings.Contains(stack, "created by runtime.gc") || - strings.Contains(stack, "created by runtime/trace.Start") || - strings.Contains(stack, "created by google3/base/go/log.init") || - strings.Contains(stack, "interestingGoroutines") || - strings.Contains(stack, "runtime.MHeap_Scavenger") || - strings.Contains(stack, "signal.signal_recv") || - strings.Contains(stack, "sigterm.handler") || - strings.Contains(stack, "runtime_mcall") || - strings.Contains(stack, "goroutine in C code") { - continue - } - gs = append(gs, g) - } - sort.Strings(gs) - return -} - -// leakCheck snapshots the currently-running goroutines and returns a -// function to be run at the end of tests to see whether any -// goroutines leaked. -func leakCheck(t testing.TB) func() { - orig := map[string]bool{} - for _, g := range interestingGoroutines() { - orig[g] = true - } - return func() { - // Loop, waiting for goroutines to shut down. - // Wait up to 10 seconds, but finish as quickly as possible. - deadline := time.Now().Add(10 * time.Second) - for { - var leaked []string - for _, g := range interestingGoroutines() { - if !orig[g] { - leaked = append(leaked, g) - } - } - if len(leaked) == 0 { - return - } - if time.Now().Before(deadline) { - time.Sleep(50 * time.Millisecond) - continue - } - for _, g := range leaked { - t.Errorf("Leaked goroutine: %v", g) - } - return + itemSize int + itemCount int +} + +func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error { + for i := 0; i < s.itemCount; i++ { + err := srv.Send(&testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + // Sending a large stream of data which the client reject + // helps to trigger some types of flow control bugs. + // + // Reallocating memory here is inefficient, but the stress it + // puts on the GC leads to more frequent flow control + // failures. The GC likely causes more variety in the + // goroutine scheduling orders. + Body: bytes.Repeat([]byte("a"), s.itemSize), + }, + }) + if err != nil { + return err } } + return nil } type lockingWriter struct { @@ -3657,10 +4420,6 @@ func logOutputHasContents(v []byte, wakeup chan<- bool) bool { return false } -func init() { - grpclog.SetLogger(log.New(testLogOutput, "", log.LstdFlags)) -} - var verboseLogs = flag.Bool("verbose_logs", false, "show all grpclog output, without filtering") func noop() {} @@ -3742,14 +4501,14 @@ func (ss *stubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallSer } // Start starts the server and creates a client connected to it. -func (ss *stubServer) Start() error { - lis, err := net.Listen("tcp", ":0") +func (ss *stubServer) Start(sopts []grpc.ServerOption) error { + lis, err := net.Listen("tcp", "localhost:0") if err != nil { - return fmt.Errorf(`net.Listen("tcp", ":0") = %v`, err) + return fmt.Errorf(`net.Listen("tcp", "localhost:0") = %v`, err) } ss.cleanups = append(ss.cleanups, func() { lis.Close() }) - s := grpc.NewServer() + s := grpc.NewServer(sopts...) testpb.RegisterTestServiceServer(s, ss) go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) @@ -3782,7 +4541,7 @@ func TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { return &testpb.Empty{}, nil }, } - if err := endpoint.Start(); err != nil { + if err := endpoint.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer endpoint.Stop() @@ -3797,7 +4556,7 @@ func TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { return endpoint.client.EmptyCall(ctx, in) }, } - if err := proxy.Start(); err != nil { + if err := proxy.Start(nil); err != nil { t.Fatalf("Error starting proxy server: %v", err) } defer proxy.Stop() @@ -3845,7 +4604,7 @@ func TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { return nil }, } - if err := endpoint.Start(); err != nil { + if err := endpoint.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer endpoint.Stop() @@ -3861,7 +4620,7 @@ func TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { return doFDC(ctx, endpoint.client) }, } - if err := proxy.Start(); err != nil { + if err := proxy.Start(nil); err != nil { t.Fatalf("Error starting proxy server: %v", err) } defer proxy.Stop() @@ -3881,3 +4640,435 @@ func TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { t.Fatalf("doFDC(_, proxy.client) = %v; want nil", err) } } + +func TestStatsTagsAndTrace(t *testing.T) { + // Data added to context by client (typically in a stats handler). + tags := []byte{1, 5, 2, 4, 3} + trace := []byte{5, 2, 1, 3, 4} + + // endpoint ensures Tags() and Trace() in context match those that were added + // by the client and returns an error if not. + endpoint := &stubServer{ + emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) { + return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags) + } + if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) { + return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags) + } + if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) { + return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace) + } + if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) { + return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace) + } + return &testpb.Empty{}, nil + }, + } + if err := endpoint.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer endpoint.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + testCases := []struct { + ctx context.Context + want codes.Code + }{ + {ctx: ctx, want: codes.Internal}, + {ctx: stats.SetTags(ctx, tags), want: codes.Internal}, + {ctx: stats.SetTrace(ctx, trace), want: codes.Internal}, + {ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal}, + {ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK}, + } + + for _, tc := range testCases { + _, err := endpoint.client.EmptyCall(tc.ctx, &testpb.Empty{}) + if tc.want == codes.OK && err != nil { + t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err) + } + if s, ok := status.FromError(err); !ok || s.Code() != tc.want { + t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, ", tc.ctx, err, tc.want) + } + } +} + +func TestTapTimeout(t *testing.T) { + sopts := []grpc.ServerOption{ + grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) { + c, cancel := context.WithCancel(ctx) + // Call cancel instead of setting a deadline so we can detect which error + // occurred -- this cancellation (desired) or the client's deadline + // expired (indicating this cancellation did not affect the RPC). + time.AfterFunc(10*time.Millisecond, cancel) + return c, nil + }), + } + + ss := &stubServer{ + emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + <-ctx.Done() + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(sopts); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // This was known to be flaky; test several times. + for i := 0; i < 10; i++ { + // Set our own deadline in case the server hangs. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + res, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) + cancel() + if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled { + t.Fatalf("ss.client.EmptyCall(context.Background(), _) = %v, %v; want nil, ", res, err) + } + } +} + +type windowSizeConfig struct { + serverStream int32 + serverConn int32 + clientStream int32 + clientConn int32 +} + +func max(a, b int32) int32 { + if a > b { + return a + } + return b +} + +func TestConfigurableWindowSizeWithLargeWindow(t *testing.T) { + defer leakcheck.Check(t) + wc := windowSizeConfig{ + serverStream: 8 * 1024 * 1024, + serverConn: 12 * 1024 * 1024, + clientStream: 6 * 1024 * 1024, + clientConn: 8 * 1024 * 1024, + } + for _, e := range listTestEnv() { + testConfigurableWindowSize(t, e, wc) + } +} + +func TestConfigurableWindowSizeWithSmallWindow(t *testing.T) { + defer leakcheck.Check(t) + wc := windowSizeConfig{ + serverStream: 1, + serverConn: 1, + clientStream: 1, + clientConn: 1, + } + for _, e := range listTestEnv() { + testConfigurableWindowSize(t, e, wc) + } +} + +func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) { + te := newTest(t, e) + te.serverInitialWindowSize = wc.serverStream + te.serverInitialConnWindowSize = wc.serverConn + te.clientInitialWindowSize = wc.clientStream + te.clientInitialConnWindowSize = wc.clientConn + + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + stream, err := tc.FullDuplexCall(context.Background()) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + numOfIter := 11 + // Set message size to exhaust largest of window sizes. + messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1) + messageSize = max(messageSize, 64*1024) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize) + if err != nil { + t.Fatal(err) + } + respParams := []*testpb.ResponseParameters{ + { + Size: proto.Int32(messageSize), + }, + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParams, + Payload: payload, + } + for i := 0; i < numOfIter; i++ { + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } +} + +var ( + // test authdata + authdata = map[string]string{ + "test-key": "test-value", + "test-key2-bin": string([]byte{1, 2, 3}), + } +) + +type testPerRPCCredentials struct{} + +func (cr testPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return authdata, nil +} + +func (cr testPerRPCCredentials) RequireTransportSecurity() bool { + return false +} + +func authHandle(ctx context.Context, info *tap.Info) (context.Context, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return ctx, fmt.Errorf("didn't find metadata in context") + } + for k, vwant := range authdata { + vgot, ok := md[k] + if !ok { + return ctx, fmt.Errorf("didn't find authdata key %v in context", k) + } + if vgot[0] != vwant { + return ctx, fmt.Errorf("for key %v, got value %v, want %v", k, vgot, vwant) + } + } + return ctx, nil +} + +func TestPerRPCCredentialsViaDialOptions(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPerRPCCredentialsViaDialOptions(t, e) + } +} + +func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) { + te := newTest(t, e) + te.tapHandle = authHandle + te.perRPCCreds = testPerRPCCredentials{} + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("Test failed. Reason: %v", err) + } +} + +func TestPerRPCCredentialsViaCallOptions(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPerRPCCredentialsViaCallOptions(t, e) + } +} + +func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) { + te := newTest(t, e) + te.tapHandle = authHandle + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + t.Fatalf("Test failed. Reason: %v", err) + } +} + +func TestPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testPerRPCCredentialsViaDialOptionsAndCallOptions(t, e) + } +} + +func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { + te := newTest(t, e) + te.perRPCCreds = testPerRPCCredentials{} + // When credentials are provided via both dial options and call options, + // we apply both sets. + te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return ctx, fmt.Errorf("couldn't find metadata in context") + } + for k, vwant := range authdata { + vgot, ok := md[k] + if !ok { + return ctx, fmt.Errorf("couldn't find metadata for key %v", k) + } + if len(vgot) != 2 { + return ctx, fmt.Errorf("len of value for key %v was %v, want 2", k, len(vgot)) + } + if vgot[0] != vwant || vgot[1] != vwant { + return ctx, fmt.Errorf("value for %v was %v, want [%v, %v]", k, vgot, vwant, vwant) + } + } + return ctx, nil + } + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testpb.NewTestServiceClient(cc) + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + t.Fatalf("Test failed. Reason: %v", err) + } +} + +func TestWaitForReadyConnection(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testWaitForReadyConnection(t, e) + } + +} + +func testWaitForReadyConnection(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() // Non-blocking dial. + tc := testpb.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + state := cc.GetState() + // Wait for connection to be Ready. + for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if state != connectivity.Ready { + t.Fatalf("Want connection state to be Ready, got %v", state) + } + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + defer cancel() + // Make a fail-fast RPC. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err) + } +} + +type errCodec struct { + noError bool +} + +func (c *errCodec) Marshal(v interface{}) ([]byte, error) { + if c.noError { + return []byte{}, nil + } + return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12") +} + +func (c *errCodec) Unmarshal(data []byte, v interface{}) error { + return nil +} + +func (c *errCodec) String() string { + return "Fermat's near-miss." +} + +func TestEncodeDoesntPanic(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testEncodeDoesntPanic(t, e) + } +} + +func testEncodeDoesntPanic(t *testing.T, e env) { + te := newTest(t, e) + erc := &errCodec{} + te.customCodec = erc + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + te.customCodec = nil + tc := testpb.NewTestServiceClient(te.clientConn()) + // Failure case, should not panic. + tc.EmptyCall(context.Background(), &testpb.Empty{}) + erc.noError = true + // Passing case. + if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall(_, _) = _, %v, want _, ", err) + } +} + +func TestSvrWriteStatusEarlyWrite(t *testing.T) { + defer leakcheck.Check(t) + for _, e := range listTestEnv() { + testSvrWriteStatusEarlyWrite(t, e) + } +} + +func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { + te := newTest(t, e) + const smallSize = 1024 + const largeSize = 2048 + const extraLargeSize = 4096 + te.maxServerReceiveMsgSize = newInt(largeSize) + te.maxServerSendMsgSize = newInt(largeSize) + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) + if err != nil { + t.Fatal(err) + } + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testpb.NewTestServiceClient(te.clientConn()) + respParam := []*testpb.ResponseParameters{ + { + Size: proto.Int32(int32(smallSize)), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), + ResponseParameters: respParam, + Payload: extraLargePayload, + } + // Test recv case: server receives a message larger than maxServerReceiveMsgSize. + stream, err := tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err = stream.Send(sreq); err != nil { + t.Fatalf("%v.Send() = _, %v, want ", stream, err) + } + if _, err = stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + // Test send case: server sends a message larger than maxServerSendMsgSize. + sreq.Payload = smallPayload + respParam[0].Size = proto.Int32(int32(extraLargeSize)) + + stream, err = tc.FullDuplexCall(te.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err = stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err = stream.Recv(); err == nil || grpc.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } +} diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go index e584c4d6d6b0..bb6efbe7d1ec 100644 --- a/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_testing/test.proto /* Package grpc_testing is a generated protocol buffer package. It is generated from these files: - test.proto + grpc_testing/test.proto It has these top-level messages: Empty @@ -742,47 +741,48 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "test.proto", + Metadata: "grpc_testing/test.proto", } -func init() { proto.RegisterFile("test.proto", fileDescriptor0) } +func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 567 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x54, 0x51, 0x6f, 0xd2, 0x50, - 0x14, 0xb6, 0x03, 0x64, 0x1c, 0x58, 0x43, 0x0e, 0x59, 0x64, 0x9d, 0x89, 0x4b, 0x7d, 0xb0, 0x9a, - 0x88, 0x86, 0x44, 0x1f, 0x35, 0x73, 0x63, 0x71, 0x09, 0x03, 0x6c, 0xe1, 0x99, 0x5c, 0xe1, 0x0e, - 0x9b, 0x94, 0xb6, 0xb6, 0xb7, 0x46, 0x7c, 0xf0, 0x8f, 0xf9, 0x67, 0xfc, 0x11, 0xfe, 0x00, 0xef, - 0xbd, 0x6d, 0xa1, 0x40, 0x17, 0x99, 0xc6, 0xbd, 0xb5, 0xdf, 0xf9, 0xce, 0x77, 0xbe, 0xef, 0x9e, - 0xdb, 0x02, 0x30, 0x1a, 0xb2, 0x96, 0x1f, 0x78, 0xcc, 0xc3, 0xda, 0x2c, 0xf0, 0x27, 0x2d, 0x01, - 0xd8, 0xee, 0x4c, 0x2f, 0x43, 0xa9, 0x33, 0xf7, 0xd9, 0x42, 0xef, 0x42, 0x79, 0x40, 0x16, 0x8e, - 0x47, 0xa6, 0xf8, 0x1c, 0x8a, 0x6c, 0xe1, 0xd3, 0xa6, 0x72, 0xa2, 0x18, 0x6a, 0xfb, 0xa8, 0x95, - 0x6d, 0x68, 0x25, 0xa4, 0x21, 0x27, 0x98, 0x92, 0x86, 0x08, 0xc5, 0x8f, 0xde, 0x74, 0xd1, 0xdc, - 0xe3, 0xf4, 0x9a, 0x29, 0x9f, 0xf5, 0x5f, 0x0a, 0x1c, 0x58, 0xf6, 0xdc, 0x77, 0xa8, 0x49, 0x3f, - 0x47, 0xbc, 0x15, 0xdf, 0xc0, 0x41, 0x40, 0x43, 0xdf, 0x73, 0x43, 0x3a, 0xde, 0x4d, 0xbd, 0x96, - 0xf2, 0xc5, 0x1b, 0x3e, 0xce, 0xf4, 0x87, 0xf6, 0x37, 0x2a, 0xc7, 0x95, 0x56, 0x24, 0x8b, 0x63, - 0xf8, 0x02, 0xca, 0x7e, 0xac, 0xd0, 0x2c, 0xf0, 0x72, 0xb5, 0x7d, 0x98, 0x2b, 0x6f, 0xa6, 0x2c, - 0xa1, 0x7a, 0x6d, 0x3b, 0xce, 0x38, 0x0a, 0x69, 0xe0, 0x92, 0x39, 0x6d, 0x16, 0x79, 0xdb, 0xbe, - 0x59, 0x13, 0xe0, 0x28, 0xc1, 0xd0, 0x80, 0xba, 0x24, 0x79, 0x24, 0x62, 0x9f, 0xc6, 0xe1, 0xc4, - 0xe3, 0xee, 0x4b, 0x92, 0xa7, 0x0a, 0xbc, 0x2f, 0x60, 0x4b, 0xa0, 0xfa, 0x77, 0x50, 0xd3, 0xd4, - 0xb1, 0xab, 0xac, 0x23, 0x65, 0x27, 0x47, 0x1a, 0xec, 0x2f, 0xcd, 0x88, 0x88, 0x15, 0x73, 0xf9, - 0x8e, 0x8f, 0xa0, 0x9a, 0xf5, 0x50, 0x90, 0x65, 0xf0, 0x56, 0xf3, 0xbb, 0x70, 0x64, 0xb1, 0x80, - 0x92, 0x39, 0x97, 0xbe, 0x74, 0xfd, 0x88, 0x9d, 0x11, 0xc7, 0x49, 0x37, 0x70, 0x5b, 0x2b, 0xfa, - 0x10, 0xb4, 0x3c, 0xb5, 0x24, 0xd9, 0x6b, 0x78, 0x40, 0x66, 0xb3, 0x80, 0xce, 0x08, 0xa3, 0xd3, - 0x71, 0xd2, 0x13, 0xaf, 0x46, 0x91, 0xab, 0x39, 0x5c, 0x95, 0x13, 0x69, 0xb1, 0x23, 0xfd, 0x12, - 0x30, 0xd5, 0x18, 0x90, 0x80, 0xc7, 0x62, 0x34, 0x08, 0xc5, 0x25, 0xca, 0xb4, 0xca, 0x67, 0x11, - 0xd7, 0x76, 0x79, 0xf5, 0x0b, 0x11, 0x0b, 0x4a, 0x16, 0x0e, 0x29, 0x34, 0x0a, 0xf5, 0x9f, 0x4a, - 0xc6, 0x61, 0x3f, 0x62, 0x1b, 0x81, 0xff, 0xf5, 0xca, 0x7d, 0x80, 0xc6, 0xb2, 0xdf, 0x5f, 0x5a, - 0xe5, 0x3e, 0x0a, 0xfc, 0xf0, 0x4e, 0xd6, 0x55, 0xb6, 0x23, 0x99, 0x18, 0x6c, 0xc7, 0xbc, 0xed, - 0x05, 0xd5, 0x7b, 0x70, 0x9c, 0x9b, 0xf0, 0x2f, 0xaf, 0xd7, 0xb3, 0xb7, 0x50, 0xcd, 0x04, 0xc6, - 0x3a, 0xd4, 0xce, 0xfa, 0x57, 0x03, 0xb3, 0x63, 0x59, 0xa7, 0xef, 0xba, 0x9d, 0xfa, 0x3d, 0xbe, - 0x08, 0x75, 0xd4, 0x5b, 0xc3, 0x14, 0x04, 0xb8, 0x6f, 0x9e, 0xf6, 0xce, 0xfb, 0x57, 0xf5, 0xbd, - 0xf6, 0x8f, 0x22, 0x54, 0x87, 0x5c, 0xdd, 0xe2, 0x4b, 0xb0, 0x27, 0x14, 0x5f, 0x41, 0x45, 0xfe, - 0x40, 0x84, 0x2d, 0x6c, 0xac, 0x4f, 0x97, 0x05, 0x2d, 0x0f, 0xc4, 0x0b, 0xa8, 0x8c, 0x5c, 0x12, - 0xc4, 0x6d, 0xc7, 0xeb, 0x8c, 0xb5, 0x1f, 0x87, 0xf6, 0x30, 0xbf, 0x98, 0x1c, 0x80, 0x03, 0x8d, - 0x9c, 0xf3, 0x41, 0x63, 0xa3, 0xe9, 0xc6, 0x4b, 0xa2, 0x3d, 0xdd, 0x81, 0x19, 0xcf, 0x7a, 0xa9, - 0xa0, 0x0d, 0xb8, 0xfd, 0x45, 0xe0, 0x93, 0x1b, 0x24, 0x36, 0xbf, 0x40, 0xcd, 0xf8, 0x33, 0x31, - 0x1e, 0x65, 0x88, 0x51, 0xea, 0x45, 0xe4, 0x38, 0xe7, 0x11, 0x4f, 0xfb, 0xf5, 0xbf, 0x65, 0x32, - 0x14, 0x99, 0x4a, 0x7d, 0x4f, 0x9c, 0xeb, 0x3b, 0x18, 0xf5, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x4c, - 0x41, 0xfe, 0xb6, 0x89, 0x06, 0x00, 0x00, + // 582 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, + 0x10, 0xfd, 0xb6, 0x49, 0xbe, 0x34, 0x93, 0xd4, 0x8a, 0x36, 0xaa, 0xea, 0xba, 0x48, 0x58, 0xe6, + 0x02, 0x83, 0x44, 0x8a, 0x22, 0xc1, 0x25, 0xa8, 0xb4, 0xa9, 0xa8, 0x94, 0x26, 0xc1, 0x4e, 0xae, + 0xa3, 0x25, 0xd9, 0x1a, 0x4b, 0x8e, 0xbd, 0xac, 0xd7, 0x15, 0xe9, 0x05, 0x2f, 0xc6, 0xcb, 0xf0, + 0x10, 0x3c, 0x00, 0x5a, 0xff, 0x24, 0x4e, 0xe2, 0x8a, 0x14, 0x04, 0x57, 0xb6, 0x67, 0xce, 0x9c, + 0x39, 0xc7, 0x33, 0xbb, 0x70, 0xe4, 0x70, 0x36, 0x9d, 0x08, 0x1a, 0x0a, 0xd7, 0x77, 0x4e, 0xe5, + 0xb3, 0xcd, 0x78, 0x20, 0x02, 0xdc, 0x90, 0x89, 0x76, 0x9a, 0x30, 0xaa, 0x50, 0xe9, 0xce, 0x99, + 0x58, 0x18, 0x3d, 0xa8, 0x0e, 0xc9, 0xc2, 0x0b, 0xc8, 0x0c, 0xbf, 0x80, 0xb2, 0x58, 0x30, 0xaa, + 0x22, 0x1d, 0x99, 0x4a, 0xe7, 0xb8, 0x9d, 0x2f, 0x68, 0xa7, 0xa0, 0xd1, 0x82, 0x51, 0x2b, 0x86, + 0x61, 0x0c, 0xe5, 0x8f, 0xc1, 0x6c, 0xa1, 0xee, 0xe9, 0xc8, 0x6c, 0x58, 0xf1, 0xbb, 0xf1, 0x03, + 0xc1, 0x81, 0xed, 0xce, 0x99, 0x47, 0x2d, 0xfa, 0x39, 0xa2, 0xa1, 0xc0, 0x6f, 0xe0, 0x80, 0xd3, + 0x90, 0x05, 0x7e, 0x48, 0x27, 0xbb, 0xb1, 0x37, 0x32, 0xbc, 0xfc, 0xc2, 0x4f, 0x72, 0xf5, 0xa1, + 0x7b, 0x47, 0xe3, 0x76, 0x95, 0x15, 0xc8, 0x76, 0xef, 0x28, 0x3e, 0x85, 0x2a, 0x4b, 0x18, 0xd4, + 0x92, 0x8e, 0xcc, 0x7a, 0xe7, 0xb0, 0x90, 0xde, 0xca, 0x50, 0x92, 0xf5, 0xc6, 0xf5, 0xbc, 0x49, + 0x14, 0x52, 0xee, 0x93, 0x39, 0x55, 0xcb, 0x3a, 0x32, 0xf7, 0xad, 0x86, 0x0c, 0x8e, 0xd3, 0x18, + 0x36, 0xa1, 0x19, 0x83, 0x02, 0x12, 0x89, 0x4f, 0x93, 0x70, 0x1a, 0x30, 0xaa, 0x56, 0x62, 0x9c, + 0x22, 0xe3, 0x03, 0x19, 0xb6, 0x65, 0xd4, 0xf8, 0x0a, 0x4a, 0xe6, 0x3a, 0x51, 0x95, 0x57, 0x84, + 0x76, 0x52, 0xa4, 0xc1, 0xfe, 0x52, 0x8c, 0xb4, 0x58, 0xb3, 0x96, 0xdf, 0xf8, 0x31, 0xd4, 0xf3, + 0x1a, 0x4a, 0x71, 0x1a, 0x82, 0x55, 0xff, 0x1e, 0x1c, 0xdb, 0x82, 0x53, 0x32, 0x77, 0x7d, 0xe7, + 0xca, 0x67, 0x91, 0x38, 0x27, 0x9e, 0x97, 0x4d, 0xe0, 0xa1, 0x52, 0x8c, 0x11, 0x68, 0x45, 0x6c, + 0xa9, 0xb3, 0xd7, 0x70, 0x44, 0x1c, 0x87, 0x53, 0x87, 0x08, 0x3a, 0x9b, 0xa4, 0x35, 0xc9, 0x68, + 0x50, 0x3c, 0x9a, 0xc3, 0x55, 0x3a, 0xa5, 0x96, 0x33, 0x32, 0xae, 0x00, 0x67, 0x1c, 0x43, 0xc2, + 0xc9, 0x9c, 0x0a, 0xca, 0x43, 0xb9, 0x44, 0xb9, 0xd2, 0xf8, 0x5d, 0xda, 0x75, 0x7d, 0x41, 0xf9, + 0x2d, 0x91, 0x03, 0x4a, 0x07, 0x0e, 0x59, 0x68, 0x1c, 0x1a, 0xdf, 0x51, 0x4e, 0xe1, 0x20, 0x12, + 0x1b, 0x86, 0xff, 0x74, 0xe5, 0x3e, 0x40, 0x6b, 0x59, 0xcf, 0x96, 0x52, 0xd5, 0x3d, 0xbd, 0x64, + 0xd6, 0x3b, 0xfa, 0x3a, 0xcb, 0xb6, 0x25, 0x0b, 0xf3, 0x6d, 0x9b, 0x0f, 0x5d, 0x50, 0xa3, 0x0f, + 0x27, 0x85, 0x0e, 0x7f, 0x73, 0xbd, 0x9e, 0xbf, 0x85, 0x7a, 0xce, 0x30, 0x6e, 0x42, 0xe3, 0x7c, + 0x70, 0x3d, 0xb4, 0xba, 0xb6, 0x7d, 0xf6, 0xae, 0xd7, 0x6d, 0xfe, 0x87, 0x31, 0x28, 0xe3, 0xfe, + 0x5a, 0x0c, 0x61, 0x80, 0xff, 0xad, 0xb3, 0xfe, 0xc5, 0xe0, 0xba, 0xb9, 0xd7, 0xf9, 0x56, 0x86, + 0xfa, 0x88, 0x86, 0xc2, 0xa6, 0xfc, 0xd6, 0x9d, 0x52, 0xfc, 0x0a, 0x6a, 0xf1, 0x05, 0x22, 0x65, + 0xe1, 0xd6, 0x7a, 0xf7, 0x38, 0xa1, 0x15, 0x05, 0xf1, 0x25, 0xd4, 0xc6, 0x3e, 0xe1, 0x49, 0xd9, + 0xc9, 0x3a, 0x62, 0xed, 0xe2, 0xd0, 0x1e, 0x15, 0x27, 0xd3, 0x1f, 0xe0, 0x41, 0xab, 0xe0, 0xff, + 0x60, 0x73, 0xa3, 0xe8, 0xde, 0x25, 0xd1, 0x9e, 0xed, 0x80, 0x4c, 0x7a, 0xbd, 0x44, 0xd8, 0x05, + 0xbc, 0x7d, 0x22, 0xf0, 0xd3, 0x7b, 0x28, 0x36, 0x4f, 0xa0, 0x66, 0xfe, 0x1a, 0x98, 0xb4, 0x32, + 0x65, 0x2b, 0xe5, 0x32, 0xf2, 0xbc, 0x8b, 0x88, 0x79, 0xf4, 0xcb, 0x5f, 0xf3, 0x64, 0xa2, 0xd8, + 0x95, 0xf2, 0x9e, 0x78, 0x37, 0xff, 0xa0, 0xd5, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa6, + 0x30, 0x01, 0x96, 0x06, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.proto b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto index b5bfe0537898..fbd22dec973e 100644 --- a/vendor/google.golang.org/grpc/test/grpc_testing/test.proto +++ b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto @@ -1,3 +1,17 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto2"; diff --git a/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go new file mode 100644 index 000000000000..76f9fc541762 --- /dev/null +++ b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package leakcheck contains functions to check leaked goroutines. +// +// Call "defer leakcheck.Check(t)" at the beginning of tests. +package leakcheck + +import ( + "runtime" + "sort" + "strings" + "time" +) + +var goroutinesToIgnore = []string{ + "testing.Main(", + "testing.tRunner(", + "testing.(*M).", + "runtime.goexit", + "created by runtime.gc", + "created by runtime/trace.Start", + "interestingGoroutines", + "runtime.MHeap_Scavenger", + "signal.signal_recv", + "sigterm.handler", + "runtime_mcall", + "(*loggingT).flushDaemon", + "goroutine in C code", +} + +// RegisterIgnoreGoroutine appends s into the ignore goroutine list. The +// goroutines whose stack trace contains s will not be identified as leaked +// goroutines. Not thread-safe, only call this function in init(). +func RegisterIgnoreGoroutine(s string) { + goroutinesToIgnore = append(goroutinesToIgnore, s) +} + +func ignore(g string) bool { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + return true + } + stack := strings.TrimSpace(sl[1]) + if strings.HasPrefix(stack, "testing.RunTests") { + return true + } + + if stack == "" { + return true + } + + for _, s := range goroutinesToIgnore { + if strings.Contains(stack, s) { + return true + } + } + + return false +} + +// interestingGoroutines returns all goroutines we care about for the purpose of +// leak checking. It excludes testing or runtime ones. +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + if !ignore(g) { + gs = append(gs, g) + } + } + sort.Strings(gs) + return +} + +// Errorfer is the interface that wraps the Errorf method. It's a subset of +// testing.TB to make it easy to use Check. +type Errorfer interface { + Errorf(format string, args ...interface{}) +} + +func check(efer Errorfer, timeout time.Duration) { + // Loop, waiting for goroutines to shut down. + // Wait up to timeout, but finish as quickly as possible. + deadline := time.Now().Add(timeout) + var leaked []string + for time.Now().Before(deadline) { + if leaked = interestingGoroutines(); len(leaked) == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + for _, g := range leaked { + efer.Errorf("Leaked goroutine: %v", g) + } +} + +// Check looks at the currently-running goroutines and checks if there are any +// interestring (created by gRPC) goroutines leaked. It waits up to 10 seconds +// in the error cases. +func Check(efer Errorfer) { + check(efer, 10*time.Second) +} diff --git a/vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go new file mode 100644 index 000000000000..50927e9db4cd --- /dev/null +++ b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck_test.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package leakcheck + +import ( + "fmt" + "strings" + "testing" + "time" +) + +type testErrorfer struct { + errorCount int + errors []string +} + +func (e *testErrorfer) Errorf(format string, args ...interface{}) { + e.errors = append(e.errors, fmt.Sprintf(format, args...)) + e.errorCount++ +} + +func TestCheck(t *testing.T) { + const leakCount = 3 + for i := 0; i < leakCount; i++ { + go func() { time.Sleep(2 * time.Second) }() + } + if ig := interestingGoroutines(); len(ig) == 0 { + t.Error("blah") + } + e := &testErrorfer{} + check(e, time.Second) + if e.errorCount != leakCount { + t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) + t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) + } + check(t, 3*time.Second) +} + +func ignoredTestingLeak(d time.Duration) { + time.Sleep(d) +} + +func TestCheckRegisterIgnore(t *testing.T) { + RegisterIgnoreGoroutine("ignoredTestingLeak") + const leakCount = 3 + for i := 0; i < leakCount; i++ { + go func() { time.Sleep(2 * time.Second) }() + } + go func() { ignoredTestingLeak(3 * time.Second) }() + if ig := interestingGoroutines(); len(ig) == 0 { + t.Error("blah") + } + e := &testErrorfer{} + check(e, time.Second) + if e.errorCount != leakCount { + t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) + t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) + } + check(t, 3*time.Second) +} diff --git a/vendor/google.golang.org/grpc/test/race.go b/vendor/google.golang.org/grpc/test/race.go new file mode 100644 index 000000000000..acfa0dfae37c --- /dev/null +++ b/vendor/google.golang.org/grpc/test/race.go @@ -0,0 +1,24 @@ +// +build race + +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +func init() { + raceMode = true +} diff --git a/vendor/google.golang.org/grpc/test/race_test.go b/vendor/google.golang.org/grpc/test/race_test.go deleted file mode 100644 index b3a7056c66ba..000000000000 --- a/vendor/google.golang.org/grpc/test/race_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build race - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc_test - -func init() { - raceMode = true -} diff --git a/vendor/google.golang.org/grpc/test/servertester_test.go b/vendor/google.golang.org/grpc/test/servertester.go similarity index 80% rename from vendor/google.golang.org/grpc/test/servertester_test.go rename to vendor/google.golang.org/grpc/test/servertester.go index 0225a8576de2..daeca0622bf7 100644 --- a/vendor/google.golang.org/grpc/test/servertester_test.go +++ b/vendor/google.golang.org/grpc/test/servertester.go @@ -1,35 +1,20 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. + * Copyright 2016 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -package grpc_test +package test import ( "bytes" @@ -287,3 +272,9 @@ func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { st.t.Fatalf("Error writing RST_STREAM: %v", err) } } + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, padding []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, padding); err != nil { + st.t.Fatalf("Error writing DATA with padding: %v", err) + } +} diff --git a/vendor/google.golang.org/grpc/test/testdata/ca.pem b/vendor/google.golang.org/grpc/test/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/test/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/test/testdata/server1.key b/vendor/google.golang.org/grpc/test/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/test/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/test/testdata/server1.pem b/vendor/google.golang.org/grpc/test/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/test/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/testdata.go b/vendor/google.golang.org/grpc/testdata/testdata.go new file mode 100644 index 000000000000..5609b19b3a50 --- /dev/null +++ b/vendor/google.golang.org/grpc/testdata/testdata.go @@ -0,0 +1,63 @@ +/* + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testdata + +import ( + "log" + "os" + "path/filepath" +) + +// Path returns the absolute path the given relative file or directory path, +// relative to the google.golang.org/grpc/testdata directory in the user's GOPATH. +// If rel is already absolute, it is returned unmodified. +func Path(rel string) string { + if filepath.IsAbs(rel) { + return rel + } + + v, err := goPackagePath("google.golang.org/grpc/testdata") + if err != nil { + log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err) + } + + return filepath.Join(v, rel) +} + +func goPackagePath(pkg string) (path string, err error) { + gp := os.Getenv("GOPATH") + if gp == "" { + return path, os.ErrNotExist + } + + for _, p := range filepath.SplitList(gp) { + dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) + fi, err := os.Stat(dir) + if os.IsNotExist(err) { + continue + } + if err != nil { + return "", err + } + if !fi.IsDir() { + continue + } + return dir, nil + } + return path, os.ErrNotExist +} diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index f6747e1dfa4b..c1c96dedcb75 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -1,33 +1,18 @@ /* * - * Copyright 2015, Google Inc. - * All rights reserved. + * Copyright 2015 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -46,7 +31,7 @@ import ( // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. // This should only be set before any RPCs are sent or received by this program. -var EnableTracing = true +var EnableTracing bool // methodFamily returns the trace family for the given method. // It turns "/pkg.Service/GetFoo" into "pkg.Service". @@ -91,6 +76,15 @@ func (f *firstLine) String() string { return line.String() } +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + // payload represents an RPC request or response payload. type payload struct { sent bool // whether this is an outgoing payload @@ -100,9 +94,9 @@ type payload struct { func (p payload) String() string { if p.sent { - return fmt.Sprintf("sent: %v", p.msg) + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) } - return fmt.Sprintf("recv: %v", p.msg) + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go new file mode 100644 index 000000000000..8dd2ed42792f --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +var ( + // Adding arbitrary data to ping so that its ack can be + // identified. + // Easter-egg: what does the ping message say? + bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} +) + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 8d29aee53d49..dd1a8d42e7e2 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -37,17 +22,18 @@ import ( "fmt" "math" "sync" + "sync/atomic" "time" "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" ) const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection + initialWindowSize = defaultWindowSize // for an RPC infinity = time.Duration(math.MaxInt64) defaultClientKeepaliveTime = infinity defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) @@ -58,11 +44,43 @@ const ( defaultServerKeepaliveTime = time.Duration(2 * time.Hour) defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultLocalSendQuota sets is default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultLocalSendQuota = 64 * 1024 ) // The following defines various control items which could flow through // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. + +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool +} + +func (*headerFrame) item() {} + +type continuationFrame struct { + streamID uint32 + endHeaders bool + headerBlockFragment []byte +} + +type dataFrame struct { + streamID uint32 + endStream bool + d []byte + f func() +} + +func (*dataFrame) item() {} + +func (*continuationFrame) item() {} + type windowUpdate struct { streamID uint32 increment uint32 @@ -87,6 +105,8 @@ func (*resetStream) item() {} type goAway struct { code http2.ErrCode debugData []byte + headsUp bool + closeConn bool } func (*goAway) item() {} @@ -108,8 +128,9 @@ func (*ping) item() {} type quotaPool struct { c chan int - mu sync.Mutex - quota int + mu sync.Mutex + version uint32 + quota int } // newQuotaPool creates a quotaPool which has quota q available to consume. @@ -130,6 +151,10 @@ func newQuotaPool(q int) *quotaPool { func (qb *quotaPool) add(v int) { qb.mu.Lock() defer qb.mu.Unlock() + qb.lockedAdd(v) +} + +func (qb *quotaPool) lockedAdd(v int) { select { case n := <-qb.c: qb.quota += n @@ -150,6 +175,35 @@ func (qb *quotaPool) add(v int) { } } +func (qb *quotaPool) addAndUpdate(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) + // Update the version only after having added to the quota + // so that if acquireWithVesrion sees the new vesrion it is + // guaranteed to have seen the updated quota. + // Also, still keep this inside of the lock, so that when + // compareAndExecute is processing, this function doesn't + // get executed partially (quota gets updated but the version + // doesn't). + atomic.AddUint32(&(qb.version), 1) +} + +func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { + return qb.c, atomic.LoadUint32(&(qb.version)) +} + +func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { + qb.mu.Lock() + defer qb.mu.Unlock() + if version == atomic.LoadUint32(&(qb.version)) { + success() + return true + } + failure() + return false +} + // acquire returns the channel on which available quota amounts are sent. func (qb *quotaPool) acquire() <-chan int { return qb.c @@ -157,16 +211,59 @@ func (qb *quotaPool) acquire() <-chan int { // inFlow deals with inbound flow control type inFlow struct { + mu sync.Mutex // The inbound flow control limit for pending data. limit uint32 - - mu sync.Mutex // pendingData is the overall data which have been received but not been // consumed by applications. pendingData uint32 // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + d := n - f.limit + f.limit = n + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 } // onData is invoked when some data frame is received. It updates pendingData. @@ -174,7 +271,7 @@ func (f *inFlow) onData(n uint32) error { f.mu.Lock() defer f.mu.Unlock() f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit { + if f.pendingData+f.pendingUpdate > f.limit+f.delta { return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } return nil @@ -189,6 +286,13 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { wu := f.pendingUpdate @@ -198,10 +302,10 @@ func (f *inFlow) onRead(n uint32) uint32 { return 0 } -func (f *inFlow) resetPendingData() uint32 { +func (f *inFlow) resetPendingUpdate() uint32 { f.mu.Lock() defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 + n := f.pendingUpdate + f.pendingUpdate = 0 return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go deleted file mode 100644 index ee1c46bad575..000000000000 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go deleted file mode 100644 index 356f13ff1972..000000000000 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 24f306babbb1..7e0fdb35938b 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -1,32 +1,18 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2016 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -47,6 +33,7 @@ import ( "sync" "time" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc/codes" @@ -102,15 +89,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr continue } for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } - } v, err := decodeMetadataHeader(k, v) if err != nil { return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) @@ -144,6 +122,10 @@ type serverHandlerTransport struct { // ServeHTTP (HandleStreams) goroutine. The channel is closed // when WriteStatus is called. writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex } func (ht *serverHandlerTransport) Close() error { @@ -179,15 +161,24 @@ func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. select { - case ht.writes <- fn: - return nil case <-ht.closedCh: return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } } } func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + err := ht.do(func() { ht.writeCommonHeaders(s) @@ -202,7 +193,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } - // TODO: Support Grpc-Status-Details-Bin + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } if md := s.Trailer(); len(md) > 0 { for k, vv := range md { @@ -218,7 +217,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } }) - close(ht.writes) + + if err == nil { // transport has not been closed + ht.Close() + close(ht.writes) + } return err } @@ -241,16 +244,17 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Message") - // TODO: Support Grpc-Status-Details-Bin + h.Add("Trailer", "Grpc-Status-Details-Bin") if s.sendCompress != "" { h.Set("Grpc-Encoding", s.sendCompress) } } -func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { return ht.do(func() { ht.writeCommonHeaders(s) + ht.rw.Write(hdr) ht.rw.Write(data) if !opts.Delay { ht.rw.(http.Flusher).Flush() @@ -309,13 +313,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace req := ht.req s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), } pr := &peer.Peer{ Addr: ht.RemoteAddr(), @@ -326,7 +330,10 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ctx = metadata.NewIncomingContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + windowHandler: func(int) {}, + } // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) @@ -338,11 +345,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { - s.buf.put(&recvMsg{data: buf[:n:n]}) + s.buf.put(recvMsg{data: buf[:n:n]}) buf = buf[n:] } if err != nil { - s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } if len(buf) == 0 { diff --git a/vendor/google.golang.org/grpc/transport/handler_server_test.go b/vendor/google.golang.org/grpc/transport/handler_server_test.go index 84378485ec40..8505e1a7fd38 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server_test.go +++ b/vendor/google.golang.org/grpc/transport/handler_server_test.go @@ -1,32 +1,18 @@ /* - * Copyright 2016, Google Inc. - * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Copyright 2016 gRPC authors. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,10 +26,14 @@ import ( "net/http/httptest" "net/url" "reflect" + "sync" "testing" "time" + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/ptypes/duration" "golang.org/x/net/context" + epb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" @@ -210,9 +200,10 @@ func TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { check: func(ht *serverHandlerTransport, tt *testCase) error { want := metadata.MD{ "meta-bar": {"bar-val1", "bar-val2"}, - "user-agent": {"x/y"}, + "user-agent": {"x/y a/b"}, "meta-foo": {"foo-val"}, } + if !reflect.DeepEqual(ht.headerMD, want) { return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want) } @@ -308,7 +299,7 @@ func TestHandlerTransport_HandleStreams(t *testing.T) { wantHeader := http.Header{ "Date": nil, "Content-Type": {"application/grpc"}, - "Trailer": {"Grpc-Status", "Grpc-Message"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, "Grpc-Status": {"0"}, } if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) { @@ -328,6 +319,7 @@ func TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) { func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) { st := newHandleStreamTest(t) + handleStream := func(s *Stream) { st.ht.WriteStatus(s, status.New(statusCode, msg)) } @@ -338,10 +330,11 @@ func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) wantHeader := http.Header{ "Date": nil, "Content-Type": {"application/grpc"}, - "Trailer": {"Grpc-Status", "Grpc-Message"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, "Grpc-Message": {encodeGrpcMessage(msg)}, } + if !reflect.DeepEqual(st.rw.HeaderMap, wantHeader) { t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", st.rw.HeaderMap, wantHeader) } @@ -389,7 +382,7 @@ func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { wantHeader := http.Header{ "Date": nil, "Content-Type": {"application/grpc"}, - "Trailer": {"Grpc-Status", "Grpc-Message"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, "Grpc-Status": {"4"}, "Grpc-Message": {encodeGrpcMessage("too slow")}, } @@ -397,3 +390,92 @@ func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { t.Errorf("Header+Trailer Map mismatch.\n got: %#v\nwant: %#v", rw.HeaderMap, wantHeader) } } + +// TestHandlerTransport_HandleStreams_MultiWriteStatus ensures that +// concurrent "WriteStatus"s do not panic writing to closed "writes" channel. +func TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) { + testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) { + if want := "/service/foo.bar"; s.method != want { + t.Errorf("stream method = %q; want %q", s.method, want) + } + st.bodyw.Close() // no body + + var wg sync.WaitGroup + wg.Add(5) + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + st.ht.WriteStatus(s, status.New(codes.OK, "")) + }() + } + wg.Wait() + }) +} + +// TestHandlerTransport_HandleStreams_WriteStatusWrite ensures that "Write" +// following "WriteStatus" does not panic writing to closed "writes" channel. +func TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) { + testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) { + if want := "/service/foo.bar"; s.method != want { + t.Errorf("stream method = %q; want %q", s.method, want) + } + st.bodyw.Close() // no body + + st.ht.WriteStatus(s, status.New(codes.OK, "")) + st.ht.Write(s, []byte("hdr"), []byte("data"), &Options{}) + }) +} + +func testHandlerTransportHandleStreams(t *testing.T, handleStream func(st *handleStreamTest, s *Stream)) { + st := newHandleStreamTest(t) + st.ht.HandleStreams( + func(s *Stream) { go handleStream(st, s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) +} + +func TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) { + errDetails := []proto.Message{ + &epb.RetryInfo{ + RetryDelay: &dpb.Duration{Seconds: 60}, + }, + &epb.ResourceInfo{ + ResourceType: "foo bar", + ResourceName: "service.foo.bar", + Owner: "User", + }, + } + + statusCode := codes.ResourceExhausted + msg := "you are being throttled" + st, err := status.New(statusCode, msg).WithDetails(errDetails...) + if err != nil { + t.Fatal(err) + } + + stBytes, err := proto.Marshal(st.Proto()) + if err != nil { + t.Fatal(err) + } + + hst := newHandleStreamTest(t) + handleStream := func(s *Stream) { + hst.ht.WriteStatus(s, st) + } + hst.ht.HandleStreams( + func(s *Stream) { go handleStream(s) }, + func(ctx context.Context, method string) context.Context { return ctx }, + ) + wantHeader := http.Header{ + "Date": nil, + "Content-Type": {"application/grpc"}, + "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, + "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, + "Grpc-Message": {encodeGrpcMessage(msg)}, + "Grpc-Status-Details-Bin": {encodeBinHeader(stBytes)}, + } + + if !reflect.DeepEqual(hst.rw.HeaderMap, wantHeader) { + t.Errorf("Header+Trailer mismatch.\n got: %#v\nwant: %#v", hst.rw.HeaderMap, wantHeader) + } +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 380fff665fbb..1abb62e6df4a 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -48,7 +33,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -59,6 +43,7 @@ import ( // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { ctx context.Context + cancel context.CancelFunc target string // server name/addr userAgent string md interface{} @@ -68,17 +53,6 @@ type http2Client struct { authInfo credentials.AuthInfo // auth info about the connection nextID uint32 // the next stream ID to be used - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} @@ -91,7 +65,7 @@ type http2Client struct { // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer + controlBuf *controlBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool @@ -101,6 +75,8 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string + isSecure bool + creds []credentials.PerRPCCredentials // Boolean to keep track of reading activity on transport. @@ -110,6 +86,11 @@ type http2Client struct { statsHandler stats.Handler + initialWindowSize int32 + + bdpEst *bdpEstimator + outQuotaVersion uint32 + mu sync.Mutex // guard the following variables state transportState // the state of underlying connection activeStreams map[uint32]*Stream @@ -117,8 +98,6 @@ type http2Client struct { maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 - // goAwayID records the Last-Stream-ID in the GoAway frame from the server. - goAwayID uint32 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -130,7 +109,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if fn != nil { return fn(ctx, addr) } - return dialContext(ctx, "tcp", addr) + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { @@ -164,14 +143,23 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) { +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { scheme := "http" - conn, err := dial(ctx, opts.Dialer, addr.Addr) + ctx, cancel := context.WithCancel(ctx) + connectCtx, connectCancel := context.WithTimeout(ctx, timeout) + defer func() { + connectCancel() + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err) + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { @@ -179,16 +167,20 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( conn.Close() } }(conn) - var authInfo credentials.AuthInfo + var ( + isSecure bool + authInfo credentials.AuthInfo + ) if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn) + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn) if err != nil { // Credentials handshake errors are typically considered permanent // to avoid retrying on e.g. bad certificates. temp := isTemporary(err) - return nil, connectionErrorf(temp, err, "transport: %v", err) + return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) } + isSecure = true } kp := opts.KeepaliveParams // Validate keepalive parameters. @@ -198,9 +190,24 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( if kp.Timeout == 0 { kp.Timeout = defaultClientKeepaliveTimeout } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } var buf bytes.Buffer + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } t := &http2Client{ ctx: ctx, + cancel: cancel, target: addr.Addr, userAgent: opts.UserAgent, md: addr.Metadata, @@ -209,27 +216,36 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( localAddr: conn.LocalAddr(), authInfo: authInfo, // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - creds: opts.PerRPCCredentials, - maxStreams: defaultMaxStreamsClient, - streamsQuota: newQuotaPool(defaultMaxStreamsClient), - streamSendQuota: defaultWindowSize, - kp: kp, - statsHandler: opts.StatsHandler, + nextID: 1, + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + framer: newFramer(conn, writeBufSize, readBufSize), + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + } + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } } // Make sure awakenKeepalive can't be written upon. // keepalive routine will make it writable, if need be. @@ -252,65 +268,75 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( n, err := t.conn.Write(clientPreface) if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) } if n != len(clientPreface) { t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } - if initialWindowSize != defaultWindowSize { - err = t.framer.writeSettings(true, http2.Setting{ + if t.initialWindowSize != defaultWindowSize { + err = t.framer.fr.WriteSettings(http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize), + Val: uint32(t.initialWindowSize), }) } else { - err = t.framer.writeSettings(true) + err = t.framer.fr.WriteSettings() } if err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { t.Close() - return nil, connectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } - go t.controller() + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() if t.kp.Time != infinity { go t.keepalive() } - t.writableChan <- 0 return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ - id: t.nextID, - done: make(chan struct{}), - goAway: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: &inFlow{limit: initialWindowSize}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - headerChan: make(chan struct{}), + id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + localSendQuota: newQuotaPool(defaultLocalSendQuota), + headerChan: make(chan struct{}), } t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.dec = &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, } + return s } @@ -324,31 +350,51 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.authInfo != nil { pr.AuthInfo = t.authInfo } - userCtx := ctx ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.creds { + var ( + authData = make(map[string]string) + audience string + ) + // Create an audience string only if needed. + if len(t.creds) > 0 || callHdr.Creds != nil { // Construct URI required to get auth request metadata. - var port string - if pos := strings.LastIndex(t.target, ":"); pos != -1 { - // Omit port if it is the default one. - if t.target[pos+1:] != "443" { - port = ":" + t.target[pos+1:] - } - } + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { - return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + pos = len(callHdr.Method) } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + audience = "https://" + host + callHdr.Method[:pos] + } + for _, c := range t.creds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) + return nil, streamErrorf(codes.Internal, "transport: %v", err) } for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) authData[k] = v } } + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } t.mu.Lock() if t.activeStreams == nil { t.mu.Unlock() @@ -363,7 +409,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ErrConnClosing } t.mu.Unlock() - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -371,79 +417,49 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if sq > 1 { t.streamsQuota.add(sq - 1) } - if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok { - t.streamsQuota.add(1) - } - return nil, err - } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - t.streamsQuota.add(1) - // Need to make t writable again so that the rpc in flight can still proceed. - t.writableChan <- 0 - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - s.clientStatsCtx = userCtx - t.activeStreams[s.id] = s - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 { - select { - case t.awakenKeepalive <- struct{}{}: - t.framer.writePing(false, false, [8]byte{}) - default: - } - } - - t.mu.Unlock() - - // HPACK encodes various headers. Note that once WriteField(...) is - // called, the corresponding headers/continuation frame has to be sent - // because hpack.Encoder is stateful. - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.SendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := dl.Sub(time.Now()) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } - for k, v := range authData { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - var ( - hasMD bool - endHeaders bool - ) if md, ok := metadata.FromOutgoingContext(ctx); ok { - hasMD = true for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -453,60 +469,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - first := true - bufLen := t.hBuf.Len() - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var flush bool - if endHeaders && (hasMD || callHdr.Flush) { - flush = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - err = t.framer.writeHeaders(flush, p) - first = false - } else { - // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) - } - if err != nil { - t.notifyError(err) - return nil, connectionErrorf(true, err, "transport: %v", err) + t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + t.streamsQuota.add(1) + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.controlBuf.put(&ping{data: [8]byte{}}) + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: } } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + t.mu.Unlock() + + s.mu.Lock() s.bytesSent = true + s.mu.Unlock() if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, - WireLength: bufLen, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, } - t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader) + t.statsHandler.HandleRPC(s.ctx, outHeader) } - t.writableChan <- 0 return s, nil } @@ -518,6 +530,10 @@ func (t *http2Client) CloseStream(s *Stream, err error) { t.mu.Unlock() return } + if err != nil { + // notify in-flight streams, before the deletion + s.write(recvMsg{err: err}) + } delete(t.activeStreams, s.id) if t.state == draining && len(t.activeStreams) == 0 { // The transport is draining and s is the last live stream on t. @@ -547,11 +563,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) { s.mu.Lock() rstStream = s.rstStream rstError = s.rstError - if q := s.fc.resetPendingData(); q > 0 { - if n := t.fc.onRead(q); n > 0 { - t.controlBuf.put(&windowUpdate{0, n}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -577,12 +588,9 @@ func (t *http2Client) Close() (err error) { t.mu.Unlock() return } - if t.state == reachable || t.state == draining { - close(t.errorChan) - } t.state = closing t.mu.Unlock() - close(t.shutdownChan) + t.cancel() err = t.conn.Close() t.mu.Lock() streams := t.activeStreams @@ -604,41 +612,18 @@ func (t *http2Client) Close() (err error) { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return + return err } +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. func (t *http2Client) GracefulClose() error { t.mu.Lock() switch t.state { - case unreachable: - // The server may close the connection concurrently. t is not available for - // any streams. Close it now. - t.mu.Unlock() - t.Close() - return nil - case closing: - t.mu.Unlock() - return nil - } - // Notify the streams which were initiated after the server sent GOAWAY. - select { - case <-t.goAway: - n := t.prevGoAwayID - if n == 0 && t.nextID > 1 { - n = t.nextID - 2 - } - m := t.goAwayID + 2 - if m == 2 { - m = 1 - } - for i := m; i <= n; i += 2 { - if s, ok := t.activeStreams[i]; ok { - close(s.goAway) - } - } - default: - } - if t.state == draining { + case closing, draining: t.mu.Unlock() return nil } @@ -653,21 +638,38 @@ func (t *http2Client) GracefulClose() error { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later -// if it improves the performance. -func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { - r := bytes.NewBuffer(data) - for { - var p []byte - if r.Len() > 0 { +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + if hdr == nil && data == nil && opts.Last { + // stream.CloseSend uses this to send an empty frame with endStream=True + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) + return nil + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for idx, r := range [][]byte{hdr, data} { + for len(r) > 0 { size := http2MaxFrameLen // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) if err != nil { return err } // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) if err != nil { return err } @@ -677,69 +679,51 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { if tq < size { size = tq } - p = r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) + if size > len(r) { + size = len(r) } + p := r[:size] + ps := len(p) if ps < tq { // Overbooked transport quota. Return it back. t.sendQuotaPool.add(tq - ps) } - } - var ( - endStream bool - forceFlush bool - ) - if opts.Last && r.Len() == 0 { - endStream = true - } - // Indicate there is a writer who is about to write a data frame. - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - // Return the connection quota back. - t.sendQuotaPool.add(len(p)) + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) + s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. + var endStream bool + // See if this is the last frame to be written. + if opts.Last { + if len(r)-size == 0 { // No more data in r after this iteration. + if idx == 0 { // We're writing data header. + if len(data) == 0 { // There's no data to follow. + endStream = true + } + } else { // We're writing data. + endStream = true + } + } } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(len(p)) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) + if ps < sq { + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { - // Do a force flush iff this is last frame for the entire gRPC message - // and the caller is the only writer at this moment. - forceFlush = true - } - // If WriteData fails, all the pending streams will be handled - // by http2Client.Close(). No explicit CloseStream() needs to be - // invoked. - if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { - t.notifyError(err) - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - if r.Len() == 0 { - break } } if !opts.Last { @@ -760,6 +744,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { return s, ok } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + // Piggyback connection's window update along. + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -769,41 +771,76 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } t.controlBuf.put(&windowUpdate{s.id, w}) } } +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) +} + func (t *http2Client) handleData(f *http2.DataFrame) { size := f.Header().Length - if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(connectionErrorf(true, err, "%v", err)) - return + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could've been an empty data frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -859,10 +896,10 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { - grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } - s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode)) + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -881,7 +918,11 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) { } func (t *http2Client) handlePing(f *http2.PingFrame) { - if f.IsAck() { // Do nothing. + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } return } pingAck := &ping{ack: true} @@ -890,36 +931,56 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state != reachable && t.state != draining { + t.mu.Unlock() + return + } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - grpclog.Printf("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") } - t.mu.Lock() - if t.state == reachable || t.state == draining { - if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) - return - } - select { - case <-t.goAway: - id := t.goAwayID - // t.goAway has been closed (i.e.,multiple GoAways). - if id < f.LastStreamID { - t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) - return - } - t.prevGoAwayID = id - t.goAwayID = f.LastStreamID + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). + // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay + // with the ID of the last stream the server will process. + // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we + // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server + // was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { t.mu.Unlock() + t.Close() return - default: - t.setGoAwayReason(f) } - t.goAwayID = f.LastStreamID + default: + t.setGoAwayReason(f) close(t.goAway) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + close(stream.goAway) + } } + t.prevGoAwayID = id + active := len(t.activeStreams) t.mu.Unlock() + if active == 0 { + t.Close() + } } // setGoAwayReason sets the value of t.goAwayReason based @@ -960,20 +1021,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !ok { return } + s.mu.Lock() s.bytesReceived = true + s.mu.Unlock() var state decodeState - for _, hf := range frame.Fields { - if err := state.processHeaderField(hf); err != nil { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return + if err := state.decodeResponseHeader(frame); err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true } + s.mu.Unlock() + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return } endStream := frame.StreamEnded() @@ -985,13 +1046,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader) + t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), } - t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer) + t.statsHandler.HandleRPC(s.ctx, inTrailer) } } }() @@ -1039,22 +1100,22 @@ func handleMalformedHTTP2(s *Stream, err error) { // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { // Check the validity of server preface. - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() if err != nil { - t.notifyError(err) + t.Close() return } atomic.CompareAndSwapUint32(&t.activity, 0, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.notifyError(err) + t.Close() return } t.handleSettings(sf) // loop to keep reading incoming messages on this transport. for { - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() atomic.CompareAndSwapUint32(&t.activity, 0, 1) if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1066,12 +1127,12 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) } continue } else { // Transport error. - t.notifyError(err) + t.Close() return } } @@ -1091,7 +1152,7 @@ func (t *http2Client) reader() { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: - grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) } } } @@ -1115,7 +1176,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) { t.mu.Lock() for _, stream := range t.activeStreams { // Adjust the sending quota for each stream. - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) } t.streamSendQuota = s.Val t.mu.Unlock() @@ -1123,49 +1184,78 @@ func (t *http2Client) applySettings(ss []http2.Setting) { } } -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Client) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - // If the server needs to be to intimated about stream closing, - // then we need to make sure the RST_STREAM frame is written to - // the wire before the headers of the next stream waiting on - // streamQuota. We ensure this by adding to the streamsQuota pool - // only after having acquired the writableChan to send RST_STREAM. - t.streamsQuota.add(1) - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Client) itemHandler(i item) error { + var err error + switch i := i.(type) { + case *dataFrame: + err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) + if err == nil { + i.f() + } + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + endHeaders := false + first := true + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err } - case <-t.shutdownChan: - return } + case *windowUpdate: + err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + err = t.framer.fr.WriteSettingsAck() + } else { + err = t.framer.fr.WriteSettings(i.ss...) + } + case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + err = t.framer.fr.WriteRSTStream(i.streamID, i.code) + t.streamsQuota.add(1) + case *flushIO: + err = t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + err = t.framer.fr.WritePing(i.ack, i.data) + default: + errorf("transport: http2Client.controller got unexpected item type %v\n", i) } + return err } // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. @@ -1189,7 +1279,7 @@ func (t *http2Client) keepalive() { case <-t.awakenKeepalive: // If the control gets here a ping has been sent // need to reset the timer with keepalive.Timeout. - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } else { @@ -1208,13 +1298,13 @@ func (t *http2Client) keepalive() { } t.Close() return - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } return } - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } @@ -1224,25 +1314,9 @@ func (t *http2Client) keepalive() { } func (t *http2Client) Error() <-chan struct{} { - return t.errorChan + return t.ctx.Done() } func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - // make sure t.errorChan is closed only once. - if t.state == draining { - t.mu.Unlock() - t.Close() - return - } - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } - t.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 14cd19c64c6e..00df8eed0fdb 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -36,6 +21,7 @@ package transport import ( "bytes" "errors" + "fmt" "io" "math" "math/rand" @@ -51,7 +37,6 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -67,35 +52,25 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context + cancel context.CancelFunc conn net.Conn remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by receiving a value on writableChan - // and releases it by sending on writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer + controlBuf *controlBuffer fc *inFlow // sendQuotaPool provides flow control to outbound message. sendQuotaPool *quotaPool - - stats stats.Handler - + stats stats.Handler // Flag to keep track of reading activity on transport. // 1 is true and 0 is false. activity uint32 // Accessed atomically. @@ -111,15 +86,25 @@ type http2Server struct { // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + + mu sync.Mutex // guard the following - mu sync.Mutex // guard the following + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} state transportState activeStreams map[uint32]*Stream // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 // idle is the time instant when the connection went idle. - // This is either the begining of the connection or when the number of + // This is either the beginning of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time @@ -128,32 +113,51 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - framer := newFramer(conn) + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) // Send initial settings as connection preface to client. - var settings []http2.Setting + var isettings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - settings = append(settings, http2.Setting{ + isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, Val: maxStreams, }) } - if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{ + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize)}) + Val: uint32(iwz)}) } - if err := framer.writeSettings(true, settings...); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) } // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, connectionErrorf(true, err, "transport: %v", err) + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) } } kp := config.KeepaliveParams @@ -179,29 +183,36 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err kep.MinTime = defaultKeepalivePolicyMinTime } var buf bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) t := &http2Server{ - ctx: context.Background(), - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - inTapHandle: config.InTapHandle, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - stats: config.StatsHandler, - kp: kp, - idle: time.Now(), - kep: kep, + ctx: ctx, + cancel: cancel, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } } if t.stats != nil { t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ @@ -211,37 +222,68 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - go t.controller() + t.framer.writer.Flush() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() go t.keepalive() - t.writableChan <- 0 return t, nil } // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { - buf := newRecvBuffer() - s := &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: &inFlow{limit: initialWindowSize}, - } + streamID := frame.Header().StreamID var state decodeState for _, hf := range frame.Fields { if err := state.processHeaderField(hf); err != nil { if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) } return } } + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - s.recvCompress = state.encoding if state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) } else { @@ -263,13 +305,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if len(state.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) } - s.recvCompress = state.encoding - s.method = state.method if t.inTapHandle != nil { var err error info := &tap.Info{ @@ -277,7 +318,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { - // TODO: Log the real error. + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return } @@ -289,24 +330,25 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) return } - if s.id%2 != 1 || s.id <= t.maxStreamID { + if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) return true } - t.maxStreamID = s.id + t.maxStreamID = streamID s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s + s.localSendQuota = newQuotaPool(defaultLocalSendQuota) + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) if t.stats != nil { @@ -320,6 +362,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.stats.HandleRPC(s.ctx, inHeader) } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } handle(s) return } @@ -328,40 +379,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - t.Close() - return - } - if !bytes.Equal(preface, clientPreface) { - grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - t.Close() - return - } - - frame, err := t.framer.readFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - atomic.StoreUint32(&t.activity, 1) - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - t.Close() - return - } - t.handleSettings(sf) - for { - frame, err := t.framer.readFrame() + frame, err := t.framer.fr.ReadFrame() atomic.StoreUint32(&t.activity, 1) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -378,7 +397,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. t.Close() return } - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } @@ -401,7 +420,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } @@ -421,6 +440,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { return s, true } +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. @@ -430,42 +466,78 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) { if s.state == streamDone { return } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } t.controlBuf.put(&windowUpdate{s.id, w}) } } +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) + +} + func (t *http2Server) handleData(f *http2.DataFrame) { size := f.Header().Length - if err := t.fc.onData(uint32(size)); err != nil { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could be an empty frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + errorf("transport: http2Server %v", err) + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if size > 0 { - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := t.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } return } if err := s.fc.onData(uint32(size)); err != nil { @@ -517,17 +589,38 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { ss = append(ss, s) return nil }) - // The settings will be applied once the ack is sent. t.controlBuf.put(&settings{ack: true, ss: ss}) } +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + + } +} + const ( maxPingStrikes = 2 defaultPingTimeout = 2 * time.Hour ) func (t *http2Server) handlePing(f *http2.PingFrame) { - if f.IsAck() { // Do nothing. + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } return } pingAck := &ping{ack: true} @@ -550,7 +643,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { t.mu.Unlock() if ns < 1 && !t.kep.PermitWithoutStream { // Keepalive shouldn't be active thus, this new ping should - // have come after atleast defaultPingTimeout. + // have come after at least defaultPingTimeout. if t.lastPingAt.Add(defaultPingTimeout).After(now) { t.pingStrikes++ } @@ -563,7 +656,8 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings")}) + errorf("transport: Got to too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } @@ -579,47 +673,16 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - var err error - defer func() { - if err == nil { - // Reset ping strikes when seding headers since that might cause the - // peer to send ping. - atomic.StoreUint32(&t.resetPingStrikes, 1) - } - }() - // Sends the headers in a single batch. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - err = t.framer.writeHeaders(endHeaders, p) - first = false - } else { - err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) - } - if err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - } - return nil -} - // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + s.mu.Lock() if s.headerOk || s.state == streamDone { s.mu.Unlock() @@ -635,14 +698,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } md = s.header s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } for k, vv := range md { if isReservedHeader(k) { @@ -650,20 +712,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := t.hBuf.Len() - if err := t.writeHeaders(s, t.hBuf, false); err != nil { - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) if t.stats != nil { outHeader := &stats.OutHeader{ - WireLength: bufLen, + //WireLength: // TODO(mmukhi): Revisit this later, if needed. } t.stats.HandleRPC(s.Context(), outHeader) } - t.writableChan <- 0 return nil } @@ -672,6 +734,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + var headersSent, hasHeader bool s.mu.Lock() if s.state == streamDone { @@ -691,20 +759,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headersSent = true } - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() + // TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !headersSent { - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) } - t.hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(st.Code())), - }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) @@ -713,7 +776,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { panic(err) } - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. @@ -723,29 +786,32 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { continue } for _, v := range vv { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := t.hBuf.Len() - if err := t.writeHeaders(s, t.hBuf, true); err != nil { - t.Close() - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + }) if t.stats != nil { - outTrailer := &stats.OutTrailer{ - WireLength: bufLen, - } - t.stats.HandleRPC(s.Context(), outTrailer) + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } t.closeStream(s) - t.writableChan <- 0 return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { - // TODO(zhaoq): Support multi-writers for a single stream. +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + var writeHeaderFrame bool s.mu.Lock() if s.state == streamDone { @@ -759,107 +825,81 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { if writeHeaderFrame { t.WriteHeader(s, nil) } - defer func() { - if err == nil { + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for _, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok we make this negative. // Reset ping strikes when sending data since this might cause // the peer to send ping. atomic.StoreUint32(&t.resetPingStrikes, 1) - } - }() - r := bytes.NewBuffer(data) - for { - if r.Len() == 0 { - return nil - } - size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p := r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the - // transport. - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok { - // Return the connection quota back. - t.sendQuotaPool.add(ps) - } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { + s.localSendQuota.add(ps) + }}) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(ps) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) + failure := func() { + s.sendQuotaPool.lockedAdd(sq) } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - var forceFlush bool - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { - forceFlush = true - } - if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { - t.Close() - return connectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - } - -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - defer t.mu.Unlock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota)) + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) } - t.streamSendQuota = s.Val } - } + return nil } // keepalive running in a separate goroutine does the following: // 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. // 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. // 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. -// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-resposive connection +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} @@ -868,7 +908,7 @@ func (t *http2Server) keepalive() { maxAge := time.NewTimer(t.kp.MaxConnectionAge) keepalive := time.NewTimer(t.kp.Time) // NOTE: All exit paths of this function should reset their - // respecitve timers. A failure to do so will cause the + // respective timers. A failure to do so will cause the // following clean-up to deadlock and eventually leak. defer func() { if !maxIdle.Stop() { @@ -892,23 +932,18 @@ func (t *http2Server) keepalive() { continue } val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.state = draining - t.mu.Unlock() - t.Drain() + t.drain(http2.ErrCodeNo, []byte{}) // Reseting the timer so that the clean-up doesn't deadlock. maxIdle.Reset(infinity) return } - t.mu.Unlock() maxIdle.Reset(val) case <-maxAge.C: - t.mu.Lock() - t.state = draining - t.mu.Unlock() - t.Drain() + t.drain(http2.ErrCodeNo, []byte{}) maxAge.Reset(t.kp.MaxConnectionAgeGrace) select { case <-maxAge.C: @@ -916,7 +951,7 @@ func (t *http2Server) keepalive() { t.Close() // Reseting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) - case <-t.shutdownChan: + case <-t.ctx.Done(): } return case <-keepalive.C: @@ -934,69 +969,137 @@ func (t *http2Server) keepalive() { pingSent = true t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } } -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Server) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Server) itemHandler(i item) error { + switch i := i.(type) { + case *dataFrame: + if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { + return err + } + i.f() + return nil + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + first := true + endHeaders := false + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var err error + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + atomic.StoreUint32(&t.resetPingStrikes, 1) + return nil + case *windowUpdate: + return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + return t.framer.fr.WriteSettingsAck() + } + return t.framer.fr.WriteSettings(i.ss...) + case *resetStream: + return t.framer.fr.WriteRSTStream(i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return fmt.Errorf("transport: Connection closing") + } + sid := t.maxStreamID + if !i.headsUp { + // Stop accepting more streams now. + t.state = draining + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { + return err + } + if i.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return fmt.Errorf("transport: Connection closing") + } + return nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return - } - sid := t.maxStreamID - t.state = draining - t.mu.Unlock() - t.framer.writeGoAway(true, sid, i.code, i.debugData) - if i.code == http2.ErrCodeEnhanceYourCalm { - t.Close() - } - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): return } - case <-t.shutdownChan: - return + t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) + }() + return nil + case *flushIO: + return t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) } + return t.framer.fr.WritePing(i.ack, i.data) + default: + err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) + errorf("%v", err) + return err } } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { +func (t *http2Server) Close() error { t.mu.Lock() if t.state == closing { t.mu.Unlock() @@ -1006,8 +1109,8 @@ func (t *http2Server) Close() (err error) { streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() + t.cancel() + err := t.conn.Close() // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1016,7 +1119,7 @@ func (t *http2Server) Close() (err error) { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return + return err } // closeStream clears the footprint of a stream when the stream is not needed @@ -1036,11 +1139,6 @@ func (t *http2Server) closeStream(s *Stream) { // called to interrupt the potential blocking on other goroutines. s.cancel() s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if w := t.fc.onRead(q); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } if s.state == streamDone { s.mu.Unlock() return @@ -1054,7 +1152,17 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.controlBuf.put(&goAway{code: http2.ErrCodeNo}) + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) } var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 795d5d18a4f8..39f878cfd5bf 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ @@ -40,9 +25,9 @@ import ( "fmt" "io" "net" + "net/http" "strconv" "strings" - "sync/atomic" "time" "github.com/golang/protobuf/proto" @@ -50,7 +35,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -60,7 +44,8 @@ const ( // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 ) var ( @@ -88,6 +73,24 @@ var ( codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } ) // Records the states during HPACK decoding. Must be reset once the @@ -100,14 +103,17 @@ type decodeState struct { statusGen *status.Status // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not // intended for direct access outside of parsing. - rawStatusCode int32 + rawStatusCode *int rawStatusMsg string + httpStatus *int // Server side only fields. timeoutSet bool timeout time.Duration method string // key-value metadata map from the peer. - mdata map[string][]string + mdata map[string][]string + statsTags []byte + statsTrace []byte } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -159,7 +165,7 @@ func validContentType(t string) bool { func (d *decodeState) status() *status.Status { if d.statusGen == nil { // No status-details were provided; generate status using code/msg. - d.statusGen = status.New(codes.Code(d.rawStatusCode), d.rawStatusMsg) + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) } return d.statusGen } @@ -193,6 +199,51 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propogated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propogated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + func (d *decodeState) processHeaderField(f hpack.HeaderField) error { switch f.Name { case "content-type": @@ -206,7 +257,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { if err != nil { return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) } - d.rawStatusCode = int32(code) + d.rawStatusCode = &code case "grpc-message": d.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": @@ -227,18 +278,36 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { } case ":path": d.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) + } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) default: - if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return nil - } - d.mdata[f.Name] = append(d.mdata[f.Name], v) + if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { + break } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, string(v)) } return nil } @@ -406,10 +475,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn) *framer { +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), + reader: bufio.NewReaderSize(conn, readBufferSize), + writer: bufio.NewWriterSize(conn, writeBufferSize), } f.fr = http2.NewFramer(f.writer, f.reader) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -418,132 +487,3 @@ func newFramer(conn net.Conn) *framer { f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } - -func (f *framer) adjustNumWriters(i int32) int32 { - return atomic.AddInt32(&f.numWriters, i) -} - -// The following writeXXX functions can only be called when the caller gets -// unblocked from writableChan channel (i.e., owns the privilege to write). - -func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { - if err := f.fr.WriteData(streamID, endStream, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { - if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { - if err := f.fr.WriteHeaders(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { - if err := f.fr.WritePing(ack, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { - if err := f.fr.WritePriority(streamID, p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { - if err := f.fr.WritePushPromise(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { - if err := f.fr.WriteRSTStream(streamID, code); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { - if err := f.fr.WriteSettings(settings...); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettingsAck(forceFlush bool) error { - if err := f.fr.WriteSettingsAck(); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { - if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) flushWrite() error { - return f.writer.Flush() -} - -func (f *framer) readFrame() (http2.Frame, error) { - return f.fr.ReadFrame() -} - -func (f *framer) errorDetail() error { - return f.fr.ErrorDetail() -} diff --git a/vendor/google.golang.org/grpc/transport/http_util_test.go b/vendor/google.golang.org/grpc/transport/http_util_test.go index b3698018bdab..4ebb239050ec 100644 --- a/vendor/google.golang.org/grpc/transport/http_util_test.go +++ b/vendor/google.golang.org/grpc/transport/http_util_test.go @@ -1,33 +1,18 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go new file mode 100644 index 000000000000..ac8e358c5c8c --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/transport/testdata/ca.pem b/vendor/google.golang.org/grpc/transport/testdata/ca.pem deleted file mode 100644 index 6c8511a73c68..000000000000 --- a/vendor/google.golang.org/grpc/transport/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/transport/testdata/server1.key b/vendor/google.golang.org/grpc/transport/testdata/server1.key deleted file mode 100644 index 143a5b87658d..000000000000 --- a/vendor/google.golang.org/grpc/transport/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/transport/testdata/server1.pem b/vendor/google.golang.org/grpc/transport/testdata/server1.pem deleted file mode 100644 index f3d43fcc5bea..000000000000 --- a/vendor/google.golang.org/grpc/transport/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 88c2c9872129..bde8fa5c3a23 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -1,48 +1,32 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ -/* -Package transport defines and implements message oriented communication channel -to complete various transactions (e.g., an RPC). -*/ +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). package transport // import "google.golang.org/grpc/transport" import ( - "bytes" + stdctx "context" "fmt" "io" "net" "sync" + "time" "golang.org/x/net/context" "golang.org/x/net/http2" @@ -65,57 +49,56 @@ type recvMsg struct { err error } -func (*recvMsg) item() {} - -// All items in an out of a recvBuffer should be the same type. -type item interface { - item() -} - -// recvBuffer is an unbounded channel of item. +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { - c chan item + c chan recvMsg mu sync.Mutex - backlog []item + backlog []recvMsg } func newRecvBuffer() *recvBuffer { b := &recvBuffer{ - c: make(chan item, 1), + c: make(chan recvMsg, 1), } return b } -func (b *recvBuffer) put(r item) { +func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) == 0 { select { case b.c <- r: + b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) + b.mu.Unlock() } func (b *recvBuffer) load() { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} b.backlog = b.backlog[1:] default: } } + b.mu.Unlock() } -// get returns the channel that receives an item in the buffer. +// get returns the channel that receives a recvMsg in the buffer. // -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *recvBuffer) get() <-chan item { +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { return b.c } @@ -125,7 +108,7 @@ type recvBufferReader struct { ctx context.Context goAway chan struct{} recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. + last []byte // Stores the remaining data in the previous calls. err error } @@ -136,25 +119,85 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } - defer func() { r.err = err }() - if r.last != nil && r.last.Len() > 0 { + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { // Read remaining data left in last call. - return r.last.Read(p) + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil } select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) case <-r.goAway: return 0, ErrStreamDrain - case i := <-r.recv.get(): + case m := <-r.recv.get(): r.recv.load() - m := i.(*recvMsg) if m.err != nil { return 0, m.err } - r.last = bytes.NewReader(m.data) - return r.last.Read(p) + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil + } +} + +// All items in an out of a controlBuffer should be the same type. +type item interface { + item() +} + +// controlBuffer is an unbounded channel of item. +type controlBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newControlBuffer() *controlBuffer { + b := &controlBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *controlBuffer) put(r item) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *controlBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } } + b.mu.Unlock() +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *controlBuffer) get() <-chan item { + return b.c } type streamState uint8 @@ -171,11 +214,6 @@ type Stream struct { id uint32 // nil for client side Stream. st ServerTransport - // clientStatsCtx keeps the user context for stats handling. - // It's only valid on client side. Server side stats context is same as s.ctx. - // All client side stats collection should use the clientStatsCtx (instead of the stream context) - // so that all the generated stats for a particular RPC can be associated in the processing phase. - clientStatsCtx context.Context // ctx is the associated context of the stream. ctx context.Context // cancel is always nil for client side Stream. @@ -189,16 +227,20 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - dec io.Reader + trReader io.Reader fc *inFlow recvQuota uint32 + + // TODO: Remote this unused variable. // The accumulated inbound quota pending for window update. updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - sendQuotaPool *quotaPool + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if need be. + requestRead func(int) + + sendQuotaPool *quotaPool + localSendQuota *quotaPool // Close headerChan to indicate the end of reception of header metadata. headerChan chan struct{} // header caches the received header metadata. @@ -251,16 +293,24 @@ func (s *Stream) GoAway() <-chan struct{} { // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. +// header metadata or iii) the stream is canceled/expired. func (s *Stream) Header() (metadata.MD, error) { + var err error select { case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + err = ContextErr(s.ctx.Err()) case <-s.goAway: - return nil, ErrStreamDrain + err = ErrStreamDrain + case <-s.headerChan: + return s.header.Copy(), nil + } + // Even if the stream is closed, header is returned if available. + select { case <-s.headerChan: return s.header.Copy(), nil + default: } + return nil, err } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -268,8 +318,9 @@ func (s *Stream) Header() (metadata.MD, error) { // side only. func (s *Stream) Trailer() metadata.MD { s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() + c := s.trailer.Copy() + s.mu.RUnlock() + return c } // ServerTransport returns the underlying ServerTransport for the stream. @@ -297,14 +348,16 @@ func (s *Stream) Status() *status.Status { // Server side only. func (s *Stream) SetHeader(md metadata.MD) error { s.mu.Lock() - defer s.mu.Unlock() if s.headerOk || s.state == streamDone { + s.mu.Unlock() return ErrIllegalHeaderWrite } if md.Len() == 0 { + s.mu.Unlock() return nil } s.header = metadata.Join(s.header, md) + s.mu.Unlock() return nil } @@ -315,25 +368,44 @@ func (s *Stream) SetTrailer(md metadata.MD) error { return nil } s.mu.Lock() - defer s.mu.Unlock() s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() return nil } func (s *Stream) write(m recvMsg) { - s.buf.put(&m) + s.buf.put(m) } -// Read reads all the data available for this Stream from the transport and +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) if err != nil { + t.er = err return } - s.windowHandler(n) + t.windowHandler(n) return } @@ -348,15 +420,17 @@ func (s *Stream) finish(st *status.Status) { // BytesSent indicates whether any bytes have been sent on this stream. func (s *Stream) BytesSent() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesSent + bs := s.bytesSent + s.mu.Unlock() + return bs } // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesReceived + br := s.bytesReceived + s.mu.Unlock() + return br } // GoString is implemented by Stream so context.String() won't @@ -385,19 +459,22 @@ type transportState int const ( reachable transportState = iota - unreachable closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { - MaxStreams uint32 - AuthInfo credentials.AuthInfo - InTapHandle tap.ServerInHandle - StatsHandler stats.Handler - KeepaliveParams keepalive.ServerParameters - KeepalivePolicy keepalive.EnforcementPolicy + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -425,6 +502,14 @@ type ConnectOptions struct { KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int } // TargetInfo contains the information of the target such as network address and metadata. @@ -435,8 +520,8 @@ type TargetInfo struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts) +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts, timeout) } // Options provides additional hints and information for message @@ -448,7 +533,7 @@ type Options struct { // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. + // transport implementation may ignore the hint. Delay bool } @@ -468,10 +553,15 @@ type CallHdr struct { // outbound message. SendCompress string + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is - // only a hint. The transport may modify the flush decision + // only a hint. + // If it's true, the transport may modify the flush decision // for performance purposes. + // If it's false, new stream will never be flushed. Flush bool } @@ -489,7 +579,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -507,7 +597,7 @@ type ClientTransport interface { // once the transport is initiated. Error() <-chan struct{} - // GoAway returns a channel that is closed when ClientTranspor + // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} @@ -531,7 +621,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -613,45 +703,33 @@ func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return streamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) -} - -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise -// it return the StreamError for ctx.Err. -// If it receives from goAway, it returns 0, ErrStreamDrain. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { +// wait blocks until it can receive from one of the provided contexts or channels +func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) case <-done: - // User cancellation has precedence. - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - default: - } return 0, io.EOF case <-goAway: return 0, ErrStreamDrain - case <-closing: + case <-tctx.Done(): return 0, ErrConnClosing case i := <-proceed: return i, nil } } +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, stdctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, stdctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 @@ -661,6 +739,39 @@ const ( // NoReason is the default value when GoAway frame is received. NoReason GoAwayReason = 1 // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm - // was recieved and that the debug data said "too_many_pings". + // was received and that the debug data said "too_many_pings". TooManyPings GoAwayReason = 2 ) + +// loopyWriter is run in a separate go routine. It is the single code path that will +// write data on wire. +func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + } + hasData: + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + default: + if err := handler(&flushIO{}); err != nil { + return + } + break hasData + } + } + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport_test.go b/vendor/google.golang.org/grpc/transport/transport_test.go index 4e986e56a210..443a39134623 100644 --- a/vendor/google.golang.org/grpc/transport/transport_test.go +++ b/vendor/google.golang.org/grpc/transport/transport_test.go @@ -1,44 +1,33 @@ /* * - * Copyright 2014, Google Inc. - * All rights reserved. + * Copyright 2014 gRPC authors. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. + * http://www.apache.org/licenses/LICENSE-2.0 * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * */ package transport import ( + "bufio" "bytes" + "encoding/binary" + "errors" "fmt" "io" "math" "net" + "net/http" "reflect" "strconv" "strings" @@ -60,6 +49,7 @@ type server struct { startedErr chan error // error (or nil) with server start value mu sync.Mutex conns map[ServerTransport]bool + h *testStreamHandler } var ( @@ -71,7 +61,8 @@ var ( ) type testStreamHandler struct { - t *http2Server + t *http2Server + notify chan struct{} } type hType int @@ -79,11 +70,28 @@ type hType int const ( normal hType = iota suspended + notifyCall misbehaved encodingRequiredStatus invalidHeaderField + delayRead + delayWrite + pingpong ) +func (h *testStreamHandler) handleStreamAndNotify(s *Stream) { + if h.notify == nil { + return + } + go func() { + select { + case <-h.notify: + default: + close(h.notify) + } + }() +} + func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { req := expectedRequest resp := expectedResponse @@ -92,7 +100,7 @@ func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { resp = expectedResponseLarge } p := make([]byte, len(req)) - _, err := io.ReadFull(s, p) + _, err := s.Read(p) if err != nil { return } @@ -100,16 +108,28 @@ func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { t.Fatalf("handleStream got %v, want %v", p, req) } // send a response back to the client. - h.t.Write(s, resp, &Options{}) + h.t.Write(s, nil, resp, &Options{}) // send the trailer to end the stream. h.t.WriteStatus(s, status.New(codes.OK, "")) } -// handleStreamSuspension blocks until s.ctx is canceled. -func (h *testStreamHandler) handleStreamSuspension(s *Stream) { - go func() { - <-s.ctx.Done() - }() +func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) { + header := make([]byte, 5) + for i := 0; i < 10; i++ { + if _, err := s.Read(header); err != nil { + t.Fatalf("Error on server while reading data header: %v", err) + } + sz := binary.BigEndian.Uint32(header[1:]) + msg := make([]byte, int(sz)) + if _, err := s.Read(msg); err != nil { + t.Fatalf("Error on server while reading message: %v", err) + } + buf := make([]byte, sz+5) + buf[0] = byte(0) + binary.BigEndian.PutUint32(buf[1:], uint32(sz)) + copy(buf[5:], msg) + h.t.Write(s, nil, buf, &Options{}) + } } func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { @@ -120,7 +140,6 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { var sent int p := make([]byte, http2MaxFrameLen) for sent < initialWindowSize { - <-conn.writableChan n := initialWindowSize - sent // The last message may be smaller than http2MaxFrameLen if n <= http2MaxFrameLen { @@ -133,11 +152,7 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { p = make([]byte, n+1) } } - if err := conn.framer.writeData(true, s.id, false, p); err != nil { - conn.writableChan <- 0 - break - } - conn.writableChan <- 0 + conn.controlBuf.put(&dataFrame{s.id, false, p, func() {}}) sent += len(p) } } @@ -148,13 +163,65 @@ func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s * } func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) { - <-h.t.writableChan - h.t.hBuf.Reset() - h.t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) - if err := h.t.writeHeaders(s, h.t.hBuf, false); err != nil { - t.Fatalf("Failed to write headers: %v", err) + headerFields := []hpack.HeaderField{} + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) + h.t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) +} + +func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + + // Wait before reading. Give time to client to start sending + // before server starts reading. + time.Sleep(2 * time.Second) + _, err := s.Read(p) + if err != nil { + t.Fatalf("s.Read(_) = _, %v, want _, ", err) + return + } + + if !bytes.Equal(p, req) { + t.Fatalf("handleStream got %v, want %v", p, req) + } + // send a response back to the client. + h.t.Write(s, nil, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, status.New(codes.OK, "")) +} + +func (h *testStreamHandler) handleStreamDelayWrite(t *testing.T, s *Stream) { + req := expectedRequest + resp := expectedResponse + if s.Method() == "foo.Large" { + req = expectedRequestLarge + resp = expectedResponseLarge + } + p := make([]byte, len(req)) + _, err := s.Read(p) + if err != nil { + t.Fatalf("s.Read(_) = _, %v, want _, ", err) + return } - h.t.writableChan <- 0 + if !bytes.Equal(p, req) { + t.Fatalf("handleStream got %v, want %v", p, req) + } + + // Wait before sending. Give time to client to start reading + // before server starts sending. + time.Sleep(2 * time.Second) + h.t.Write(s, nil, resp, &Options{}) + // send the trailer to end the stream. + h.t.WriteStatus(s, status.New(codes.OK, "")) } // start starts server. Other goroutines should block on s.readyChan for further operations. @@ -193,11 +260,17 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT return } s.conns[transport] = true + h := &testStreamHandler{t: transport.(*http2Server)} + s.h = h s.mu.Unlock() - h := &testStreamHandler{transport.(*http2Server)} switch ht { + case notifyCall: + go transport.HandleStreams(h.handleStreamAndNotify, + func(ctx context.Context, _ string) context.Context { + return ctx + }) case suspended: - go transport.HandleStreams(h.handleStreamSuspension, + go transport.HandleStreams(func(*Stream) {}, // Do nothing to handle the stream. func(ctx context.Context, method string) context.Context { return ctx }) @@ -219,6 +292,24 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT }, func(ctx context.Context, method string) context.Context { return ctx }) + case delayRead: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamDelayRead(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case delayWrite: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamDelayWrite(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) + case pingpong: + go transport.HandleStreams(func(s *Stream) { + go h.handleStreamPingPong(t, s) + }, func(ctx context.Context, method string) context.Context { + return ctx + }) default: go transport.HandleStreams(func(s *Stream) { go h.handleStream(t, s) @@ -266,7 +357,7 @@ func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hTy target := TargetInfo{ Addr: addr, } - ct, connErr = NewClientTransport(context.Background(), target, copts) + ct, connErr = NewClientTransport(context.Background(), target, copts, 2*time.Second) if connErr != nil { t.Fatalf("failed to create transport: %v", connErr) } @@ -289,7 +380,7 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, done chan net.Con } done <- conn }() - tr, err := NewClientTransport(context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts) + tr, err := NewClientTransport(context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts, 2*time.Second) if err != nil { // Server clean-up. lis.Close() @@ -301,6 +392,43 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, done chan net.Con return tr } +// TestInflightStreamClosing ensures that closing in-flight stream +// sends StreamError to concurrent stream reader. +func TestInflightStreamClosing(t *testing.T) { + serverConfig := &ServerConfig{} + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + + stream, err := client.NewStream(context.Background(), &CallHdr{}) + if err != nil { + t.Fatalf("Client failed to create RPC request: %v", err) + } + + donec := make(chan struct{}) + serr := StreamError{Desc: "client connection is closing"} + go func() { + defer close(donec) + if _, err := stream.Read(make([]byte, defaultWindowSize)); err != serr { + t.Errorf("unexpected Stream error %v, expected %v", err, serr) + } + }() + + // should unblock concurrent stream.Read + client.CloseStream(stream, serr) + + // wait for stream.Read error + timeout := time.NewTimer(5 * time.Second) + select { + case <-donec: + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + t.Fatalf("Test timed out, expected a StreamError.") + } +} + // TestMaxConnectionIdle tests that a server will send GoAway to a idle client. // An idle client is one who doesn't make any RPC calls for a duration of // MaxConnectionIdle time. @@ -385,7 +513,7 @@ func TestMaxConnectionAge(t *testing.T) { } } -// TestKeepaliveServer tests that a server closes conneciton with a client that doesn't respond to keepalive pings. +// TestKeepaliveServer tests that a server closes connection with a client that doesn't respond to keepalive pings. func TestKeepaliveServer(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ @@ -401,8 +529,18 @@ func TestKeepaliveServer(t *testing.T) { t.Fatalf("Failed to dial: %v", err) } defer client.Close() + // Set read deadline on client conn so that it doesn't block forever in errorsome cases. - client.SetReadDeadline(time.Now().Add(10 * time.Second)) + client.SetDeadline(time.Now().Add(10 * time.Second)) + + if n, err := client.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("Error writing client preface; n=%v, err=%v", n, err) + } + framer := newFramer(client, defaultWriteBufSize, defaultReadBufSize) + if err := framer.fr.WriteSettings(http2.Setting{}); err != nil { + t.Fatal("Error writing settings frame:", err) + } + framer.writer.Flush() // Wait for keepalive logic to close the connection. time.Sleep(4 * time.Second) b := make([]byte, 24) @@ -690,15 +828,15 @@ func TestClientSendAndReceive(t *testing.T) { Last: true, Delay: false, } - if err := ct.Write(s1, expectedRequest, &opts); err != nil && err != io.EOF { + if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF { t.Fatalf("failed to send data: %v", err) } p := make([]byte, len(expectedResponse)) - _, recvErr := io.ReadFull(s1, p) + _, recvErr := s1.Read(p) if recvErr != nil || !bytes.Equal(p, expectedResponse) { t.Fatalf("Error: %v, want ; Result: %v, want %v", recvErr, p, expectedResponse) } - _, recvErr = io.ReadFull(s1, p) + _, recvErr = s1.Read(p) if recvErr != io.EOF { t.Fatalf("Error: %v; want ", recvErr) } @@ -727,16 +865,16 @@ func performOneRPC(ct ClientTransport) { Last: true, Delay: false, } - if err := ct.Write(s, expectedRequest, &opts); err == nil || err == io.EOF { + if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF { time.Sleep(5 * time.Millisecond) // The following s.Recv()'s could error out because the // underlying transport is gone. // // Read response p := make([]byte, len(expectedResponse)) - io.ReadFull(s, p) + s.Read(p) // Read io.EOF - io.ReadFull(s, p) + s.Read(p) } } @@ -771,14 +909,84 @@ func TestLargeMessage(t *testing.T) { if err != nil { t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) } - if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponseLarge)) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("s.Read(%v) = _, %v, want %v, ", err, p, expectedResponse) + } + if _, err = s.Read(p); err != io.EOF { + t.Errorf("Failed to complete the stream %v; want ", err) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageWithDelayRead(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, delayRead) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) + } + p := make([]byte, len(expectedResponseLarge)) + + // Give time to server to begin sending before client starts reading. + time.Sleep(2 * time.Second) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("s.Read(_) = _, %v, want _, ", err) + } + if _, err = s.Read(p); err != io.EOF { + t.Errorf("Failed to complete the stream %v; want ", err) + } + }() + } + wg.Wait() + ct.Close() + server.stop() +} + +func TestLargeMessageDelayWrite(t *testing.T) { + server, ct := setUp(t, 0, math.MaxUint32, delayWrite) + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Large", + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := ct.NewStream(context.Background(), callHdr) + if err != nil { + t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) + } + + // Give time to server to start reading before client starts sending. + time.Sleep(2 * time.Second) + if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) } p := make([]byte, len(expectedResponseLarge)) - if _, err := io.ReadFull(s, p); err != nil || !bytes.Equal(p, expectedResponseLarge) { - t.Errorf("io.ReadFull(_, %v) = _, %v, want %v, ", err, p, expectedResponse) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { + t.Errorf("io.ReadFull(%v) = _, %v, want %v, ", err, p, expectedResponse) } - if _, err = io.ReadFull(s, p); err != io.EOF { + if _, err = s.Read(p); err != io.EOF { t.Errorf("Failed to complete the stream %v; want ", err) } }() @@ -817,14 +1025,14 @@ func TestGracefulClose(t *testing.T) { Delay: false, } // The stream which was created before graceful close can still proceed. - if err := ct.Write(s, expectedRequest, &opts); err != nil && err != io.EOF { + if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF { t.Fatalf("%v.Write(_, _, _) = %v, want ", ct, err) } p := make([]byte, len(expectedResponse)) - if _, err := io.ReadFull(s, p); err != nil || !bytes.Equal(p, expectedResponse) { - t.Fatalf("io.ReadFull(_, %v) = _, %v, want %v, ", err, p, expectedResponse) + if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponse) { + t.Fatalf("s.Read(%v) = _, %v, want %v, ", err, p, expectedResponse) } - if _, err = io.ReadFull(s, p); err != io.EOF { + if _, err = s.Read(p); err != io.EOF { t.Fatalf("Failed to complete the stream %v; want ", err) } wg.Wait() @@ -839,13 +1047,15 @@ func TestLargeMessageSuspension(t *testing.T) { Method: "foo.Large", } // Set a long enough timeout for writing a large message out. - ctx, _ := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() s, err := ct.NewStream(ctx, callHdr) if err != nil { t.Fatalf("failed to open stream: %v", err) } // Write should not be done successfully due to flow control. - err = ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}) + msg := make([]byte, initialWindowSize*8) + err = ct.Write(s, nil, msg, &Options{Last: true, Delay: false}) expectedErr := streamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded) if err != expectedErr { t.Fatalf("Write got %v, want %v", err, expectedErr) @@ -967,12 +1177,7 @@ func TestServerContextCanceledOnClosedConnection(t *testing.T) { if err != nil { t.Fatalf("Failed to open stream: %v", err) } - // Make sure the headers frame is flushed out. - <-cc.writableChan - if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil { - t.Fatalf("Failed to write data: %v", err) - } - cc.writableChan <- 0 + cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, http2MaxFrameLen), func() {}}) // Loop until the server side stream is created. var ss *Stream for { @@ -998,6 +1203,172 @@ func TestServerContextCanceledOnClosedConnection(t *testing.T) { server.stop() } +func TestClientConnDecoupledFromApplicationRead(t *testing.T) { + connectOptions := ConnectOptions{ + InitialWindowSize: defaultWindowSize, + InitialConnWindowSize: defaultWindowSize, + } + server, client := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions) + defer server.stop() + defer client.Close() + + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + + if len(server.conns) == 0 { + return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") + } + return false, nil + }) + + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + notifyChan := make(chan struct{}) + server.h.notify = notifyChan + server.mu.Unlock() + cstream1, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Client failed to create first stream. Err: %v", err) + } + + <-notifyChan + var sstream1 *Stream + // Access stream on the server. + st.mu.Lock() + for _, v := range st.activeStreams { + if v.id == cstream1.id { + sstream1 = v + } + } + st.mu.Unlock() + if sstream1 == nil { + t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id) + } + // Exhaust client's connection window. + if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil { + t.Fatalf("Server failed to write data. Err: %v", err) + } + notifyChan = make(chan struct{}) + server.mu.Lock() + server.h.notify = notifyChan + server.mu.Unlock() + // Create another stream on client. + cstream2, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Client failed to create second stream. Err: %v", err) + } + <-notifyChan + var sstream2 *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + if v.id == cstream2.id { + sstream2 = v + } + } + st.mu.Unlock() + if sstream2 == nil { + t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id) + } + // Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream. + if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil { + t.Fatalf("Server failed to write data. Err: %v", err) + } + + // Client should be able to read data on second stream. + if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil { + t.Fatalf("_.Read(_) = _, %v, want _, ", err) + } + + // Client should be able to read data on first stream. + if _, err := cstream1.Read(make([]byte, defaultWindowSize)); err != nil { + t.Fatalf("_.Read(_) = _, %v, want _, ", err) + } +} + +func TestServerConnDecoupledFromApplicationRead(t *testing.T) { + serverConfig := &ServerConfig{ + InitialWindowSize: defaultWindowSize, + InitialConnWindowSize: defaultWindowSize, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) + defer server.stop() + defer client.Close() + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + + if len(server.conns) == 0 { + return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") + } + return false, nil + }) + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + server.mu.Unlock() + cstream1, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create 1st stream. Err: %v", err) + } + // Exhaust server's connection window. + if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil { + t.Fatalf("Client failed to write data. Err: %v", err) + } + //Client should be able to create another stream and send data on it. + cstream2, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create 2nd stream. Err: %v", err) + } + if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil { + t.Fatalf("Client failed to write data. Err: %v", err) + } + // Get the streams on server. + waitWhileTrue(t, func() (bool, error) { + st.mu.Lock() + defer st.mu.Unlock() + + if len(st.activeStreams) != 2 { + return true, fmt.Errorf("timed-out while waiting for server to have created the streams") + } + return false, nil + }) + var sstream1 *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + if v.id == 1 { + sstream1 = v + } + } + st.mu.Unlock() + // Trying to write more on a max-ed out stream should result in a RST_STREAM from the server. + ct := client.(*http2Client) + ct.controlBuf.put(&dataFrame{cstream2.id, true, make([]byte, 1), func() {}}) + code := http2ErrConvTab[http2.ErrCodeFlowControl] + waitWhileTrue(t, func() (bool, error) { + cstream2.mu.Lock() + defer cstream2.mu.Unlock() + if cstream2.status.Code() != code { + return true, fmt.Errorf("want code = %v, got %v", code, cstream2.status.Code()) + } + return false, nil + }) + // Reading from the stream on server should succeed. + if _, err := sstream1.Read(make([]byte, defaultWindowSize)); err != nil { + t.Fatalf("_.Read(_) = %v, want ", err) + } + + if _, err := sstream1.Read(make([]byte, 1)); err != io.EOF { + t.Fatalf("_.Read(_) = %v, want io.EOF", err) + } + +} + func TestServerWithMisbehavedClient(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, suspended) callHdr := &CallHdr{ @@ -1034,11 +1405,7 @@ func TestServerWithMisbehavedClient(t *testing.T) { } var sent int // Drain the stream flow control window - <-cc.writableChan - if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil { - t.Fatalf("Failed to write data: %v", err) - } - cc.writableChan <- 0 + cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, http2MaxFrameLen), func() {}}) sent += http2MaxFrameLen // Wait until the server creates the corresponding stream and receive some data. var ss *Stream @@ -1058,51 +1425,35 @@ func TestServerWithMisbehavedClient(t *testing.T) { } ss.fc.mu.Unlock() } - if ss.fc.pendingData != http2MaxFrameLen || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != http2MaxFrameLen || sc.fc.pendingUpdate != 0 { - t.Fatalf("Server mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, http2MaxFrameLen, 0, http2MaxFrameLen, 0) + if ss.fc.pendingData != http2MaxFrameLen || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != 0 || sc.fc.pendingUpdate != 0 { + t.Fatalf("Server mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, http2MaxFrameLen, 0, 0, 0) } // Keep sending until the server inbound window is drained for that stream. for sent <= initialWindowSize { - <-cc.writableChan - if err = cc.framer.writeData(true, s.id, false, make([]byte, 1)); err != nil { - t.Fatalf("Failed to write data: %v", err) - } - cc.writableChan <- 0 + cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, 1), func() {}}) sent++ } // Server sent a resetStream for s already. code := http2ErrConvTab[http2.ErrCodeFlowControl] - if _, err := io.ReadFull(s, make([]byte, 1)); err != io.EOF { + if _, err := s.Read(make([]byte, 1)); err != io.EOF { t.Fatalf("%v got err %v want ", s, err) } if s.status.Code() != code { t.Fatalf("%v got status %v; want Code=%v", s, s.status, code) } - if ss.fc.pendingData != 0 || ss.fc.pendingUpdate != 0 || sc.fc.pendingData != 0 || sc.fc.pendingUpdate <= initialWindowSize { - t.Fatalf("Server mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, >%d", ss.fc.pendingData, ss.fc.pendingUpdate, sc.fc.pendingData, sc.fc.pendingUpdate, initialWindowSize) - } ct.CloseStream(s, nil) - // Test server behavior for violation of connection flow control window size restriction. - // - // Keep creating new streams until the connection window is drained on the server and - // the server tears down the connection. - for { - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - // The server tears down the connection. - break - } - <-cc.writableChan - cc.framer.writeData(true, s.id, true, make([]byte, http2MaxFrameLen)) - cc.writableChan <- 0 - } ct.Close() server.stop() } func TestClientWithMisbehavedServer(t *testing.T) { - server, ct := setUp(t, 0, math.MaxUint32, misbehaved) + // Turn off BDP estimation so that the server can + // violate stream window. + connectOptions := ConnectOptions{ + InitialWindowSize: initialWindowSize, + } + server, ct := setUpWithOptions(t, 0, &ServerConfig{}, misbehaved, connectOptions) callHdr := &CallHdr{ Host: "localhost", Method: "foo.Stream", @@ -1117,18 +1468,18 @@ func TestClientWithMisbehavedServer(t *testing.T) { t.Fatalf("Failed to open stream: %v", err) } d := make([]byte, 1) - if err := ct.Write(s, d, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { + if err := ct.Write(s, nil, d, &Options{Last: true, Delay: false}); err != nil && err != io.EOF { t.Fatalf("Failed to write: %v", err) } // Read without window update. for { p := make([]byte, http2MaxFrameLen) - if _, err = s.dec.Read(p); err != nil { + if _, err = s.trReader.(*transportReader).reader.Read(p); err != nil { break } } - if s.fc.pendingData <= initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData <= initialWindowSize || conn.fc.pendingUpdate != 0 { - t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want >%d, %d, >%d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) + if s.fc.pendingData <= initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != 0 { + t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want >%d, %d, %d, >%d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, 0, 0) } if err != io.EOF { @@ -1139,25 +1490,6 @@ func TestClientWithMisbehavedServer(t *testing.T) { } conn.CloseStream(s, err) - if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate <= initialWindowSize { - t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, >%d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) - } - // Test the logic for the violation of the connection flow control window size restriction. - // - // Generate enough streams to drain the connection window. Make the server flood the traffic - // to violate flow control window size of the connection. - callHdr.Method = "foo.Connection" - for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ { - s, err := ct.NewStream(context.Background(), callHdr) - if err != nil { - break - } - if err := ct.Write(s, d, &Options{Last: true, Delay: false}); err != nil { - break - } - } - // http2Client.errChan is closed due to connection flow control window size violation. - <-conn.Error() ct.Close() server.stop() } @@ -1178,11 +1510,11 @@ func TestEncodingRequiredStatus(t *testing.T) { Last: true, Delay: false, } - if err := ct.Write(s, expectedRequest, &opts); err != nil && err != io.EOF { + if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF { t.Fatalf("Failed to write the request: %v", err) } p := make([]byte, http2MaxFrameLen) - if _, err := s.dec.Read(p); err != io.EOF { + if _, err := s.trReader.(*transportReader).Read(p); err != io.EOF { t.Fatalf("Read got error %v, want %v", err, io.EOF) } if !reflect.DeepEqual(s.Status(), encodingTestStatus) { @@ -1206,11 +1538,11 @@ func TestInvalidHeaderField(t *testing.T) { Last: true, Delay: false, } - if err := ct.Write(s, expectedRequest, &opts); err != nil && err != io.EOF { + if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF { t.Fatalf("Failed to write the request: %v", err) } p := make([]byte, http2MaxFrameLen) - _, err = s.dec.Read(p) + _, err = s.trReader.(*transportReader).Read(p) if se, ok := err.(StreamError); !ok || se.Code != codes.FailedPrecondition || !strings.Contains(err.Error(), expectedInvalidHeaderField) { t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.FailedPrecondition, expectedInvalidHeaderField) } @@ -1266,3 +1598,571 @@ func TestContextErr(t *testing.T) { } } } + +func max(a, b int32) int32 { + if a > b { + return a + } + return b +} + +type windowSizeConfig struct { + serverStream int32 + serverConn int32 + clientStream int32 + clientConn int32 +} + +func TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) { + wc := windowSizeConfig{ + serverStream: 10 * 1024 * 1024, + serverConn: 12 * 1024 * 1024, + clientStream: 6 * 1024 * 1024, + clientConn: 8 * 1024 * 1024, + } + testAccountCheckWindowSize(t, wc) +} + +func TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) { + wc := windowSizeConfig{ + serverStream: defaultWindowSize, + // Note this is smaller than initialConnWindowSize which is the current default. + serverConn: defaultWindowSize, + clientStream: defaultWindowSize, + clientConn: defaultWindowSize, + } + testAccountCheckWindowSize(t, wc) +} + +func testAccountCheckWindowSize(t *testing.T, wc windowSizeConfig) { + serverConfig := &ServerConfig{ + InitialWindowSize: wc.serverStream, + InitialConnWindowSize: wc.serverConn, + } + connectOptions := ConnectOptions{ + InitialWindowSize: wc.clientStream, + InitialConnWindowSize: wc.clientConn, + } + server, client := setUpWithOptions(t, 0, serverConfig, suspended, connectOptions) + defer server.stop() + defer client.Close() + + // Wait for server conns to be populated with new server transport. + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + if len(server.conns) == 0 { + return true, fmt.Errorf("timed out waiting for server transport to be created") + } + return false, nil + }) + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + server.mu.Unlock() + ct := client.(*http2Client) + cstream, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create stream. Err: %v", err) + } + // Wait for server to receive headers. + waitWhileTrue(t, func() (bool, error) { + st.mu.Lock() + defer st.mu.Unlock() + if len(st.activeStreams) == 0 { + return true, fmt.Errorf("timed out waiting for server to receive headers") + } + return false, nil + }) + // Sleeping to make sure the settings are applied in case of negative test. + time.Sleep(time.Second) + + waitWhileTrue(t, func() (bool, error) { + st.fc.mu.Lock() + lim := st.fc.limit + st.fc.mu.Unlock() + if lim != uint32(serverConfig.InitialConnWindowSize) { + return true, fmt.Errorf("Server transport flow control window size: got %v, want %v", lim, serverConfig.InitialConnWindowSize) + } + return false, nil + }) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + serverSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire()) + if err != nil { + t.Fatalf("Error while acquiring sendQuota on server. Err: %v", err) + } + cancel() + st.sendQuotaPool.add(serverSendQuota) + if serverSendQuota != int(connectOptions.InitialConnWindowSize) { + t.Fatalf("Server send quota(%v) not equal to client's window size(%v) on conn.", serverSendQuota, connectOptions.InitialConnWindowSize) + } + st.mu.Lock() + ssq := st.streamSendQuota + st.mu.Unlock() + if ssq != uint32(connectOptions.InitialWindowSize) { + t.Fatalf("Server stream send quota(%v) not equal to client's window size(%v) on stream.", ssq, connectOptions.InitialWindowSize) + } + ct.fc.mu.Lock() + limit := ct.fc.limit + ct.fc.mu.Unlock() + if limit != uint32(connectOptions.InitialConnWindowSize) { + t.Fatalf("Client transport flow control window size is %v, want %v", limit, connectOptions.InitialConnWindowSize) + } + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + clientSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire()) + if err != nil { + t.Fatalf("Error while acquiring sendQuota on client. Err: %v", err) + } + cancel() + ct.sendQuotaPool.add(clientSendQuota) + if clientSendQuota != int(serverConfig.InitialConnWindowSize) { + t.Fatalf("Client send quota(%v) not equal to server's window size(%v) on conn.", clientSendQuota, serverConfig.InitialConnWindowSize) + } + ct.mu.Lock() + ssq = ct.streamSendQuota + ct.mu.Unlock() + if ssq != uint32(serverConfig.InitialWindowSize) { + t.Fatalf("Client stream send quota(%v) not equal to server's window size(%v) on stream.", ssq, serverConfig.InitialWindowSize) + } + cstream.fc.mu.Lock() + limit = cstream.fc.limit + cstream.fc.mu.Unlock() + if limit != uint32(connectOptions.InitialWindowSize) { + t.Fatalf("Client stream flow control window size is %v, want %v", limit, connectOptions.InitialWindowSize) + } + var sstream *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + sstream = v + } + st.mu.Unlock() + sstream.fc.mu.Lock() + limit = sstream.fc.limit + sstream.fc.mu.Unlock() + if limit != uint32(serverConfig.InitialWindowSize) { + t.Fatalf("Server stream flow control window size is %v, want %v", limit, serverConfig.InitialWindowSize) + } +} + +// Check accounting on both sides after sending and receiving large messages. +func TestAccountCheckExpandingWindow(t *testing.T) { + server, client := setUp(t, 0, 0, pingpong) + defer server.stop() + defer client.Close() + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + if len(server.conns) == 0 { + return true, fmt.Errorf("timed out while waiting for server transport to be created") + } + return false, nil + }) + var st *http2Server + server.mu.Lock() + for k := range server.conns { + st = k.(*http2Server) + } + server.mu.Unlock() + ct := client.(*http2Client) + cstream, err := client.NewStream(context.Background(), &CallHdr{Flush: true}) + if err != nil { + t.Fatalf("Failed to create stream. Err: %v", err) + } + + msgSize := 65535 * 16 * 2 + msg := make([]byte, msgSize) + buf := make([]byte, msgSize+5) + buf[0] = byte(0) + binary.BigEndian.PutUint32(buf[1:], uint32(msgSize)) + copy(buf[5:], msg) + opts := Options{} + header := make([]byte, 5) + for i := 1; i <= 10; i++ { + if err := ct.Write(cstream, nil, buf, &opts); err != nil { + t.Fatalf("Error on client while writing message: %v", err) + } + if _, err := cstream.Read(header); err != nil { + t.Fatalf("Error on client while reading data frame header: %v", err) + } + sz := binary.BigEndian.Uint32(header[1:]) + recvMsg := make([]byte, int(sz)) + if _, err := cstream.Read(recvMsg); err != nil { + t.Fatalf("Error on client while reading data: %v", err) + } + if len(recvMsg) != len(msg) { + t.Fatalf("Length of message received by client: %v, want: %v", len(recvMsg), len(msg)) + } + } + var sstream *Stream + st.mu.Lock() + for _, v := range st.activeStreams { + sstream = v + } + st.mu.Unlock() + + waitWhileTrue(t, func() (bool, error) { + // Check that pendingData and delta on flow control windows on both sides are 0. + cstream.fc.mu.Lock() + if cstream.fc.delta != 0 { + cstream.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of client stream is non-zero") + } + if cstream.fc.pendingData != 0 { + cstream.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of client stream is non-zero") + } + cstream.fc.mu.Unlock() + sstream.fc.mu.Lock() + if sstream.fc.delta != 0 { + sstream.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of server stream is non-zero") + } + if sstream.fc.pendingData != 0 { + sstream.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of sercer stream is non-zero") + } + sstream.fc.mu.Unlock() + ct.fc.mu.Lock() + if ct.fc.delta != 0 { + ct.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of client transport is non-zero") + } + if ct.fc.pendingData != 0 { + ct.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of client transport is non-zero") + } + ct.fc.mu.Unlock() + st.fc.mu.Lock() + if st.fc.delta != 0 { + st.fc.mu.Unlock() + return true, fmt.Errorf("delta on flow control window of server transport is non-zero") + } + if st.fc.pendingData != 0 { + st.fc.mu.Unlock() + return true, fmt.Errorf("pendingData on flow control window of server transport is non-zero") + } + st.fc.mu.Unlock() + + // Check flow conrtrol window on client stream is equal to out flow on server stream. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + serverStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, sstream.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquiring server stream send quota. Err: %v", err) + } + sstream.sendQuotaPool.add(serverStreamSendQuota) + cstream.fc.mu.Lock() + clientEst := cstream.fc.limit - cstream.fc.pendingUpdate + cstream.fc.mu.Unlock() + if uint32(serverStreamSendQuota) != clientEst { + return true, fmt.Errorf("server stream outflow: %v, estimated by client: %v", serverStreamSendQuota, clientEst) + } + + // Check flow control window on server stream is equal to out flow on client stream. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + clientStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, cstream.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquiring client stream send quota. Err: %v", err) + } + cstream.sendQuotaPool.add(clientStreamSendQuota) + sstream.fc.mu.Lock() + serverEst := sstream.fc.limit - sstream.fc.pendingUpdate + sstream.fc.mu.Unlock() + if uint32(clientStreamSendQuota) != serverEst { + return true, fmt.Errorf("client stream outflow: %v. estimated by server: %v", clientStreamSendQuota, serverEst) + } + + // Check flow control window on client transport is equal to out flow of server transport. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + serverTrSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquring server transport send quota. Err: %v", err) + } + st.sendQuotaPool.add(serverTrSendQuota) + ct.fc.mu.Lock() + clientEst = ct.fc.limit - ct.fc.pendingUpdate + ct.fc.mu.Unlock() + if uint32(serverTrSendQuota) != clientEst { + return true, fmt.Errorf("server transport outflow: %v, estimated by client: %v", serverTrSendQuota, clientEst) + } + + // Check flow control window on server transport is equal to out flow of client transport. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + clientTrSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire()) + cancel() + if err != nil { + return true, fmt.Errorf("error while acquiring client transport send quota. Err: %v", err) + } + ct.sendQuotaPool.add(clientTrSendQuota) + st.fc.mu.Lock() + serverEst = st.fc.limit - st.fc.pendingUpdate + st.fc.mu.Unlock() + if uint32(clientTrSendQuota) != serverEst { + return true, fmt.Errorf("client transport outflow: %v, estimated by client: %v", clientTrSendQuota, serverEst) + } + + return false, nil + }) + +} + +func waitWhileTrue(t *testing.T, condition func() (bool, error)) { + var ( + wait bool + err error + ) + timer := time.NewTimer(time.Second * 5) + for { + wait, err = condition() + if wait { + select { + case <-timer.C: + t.Fatalf(err.Error()) + default: + time.Sleep(50 * time.Millisecond) + continue + } + } + if !timer.Stop() { + <-timer.C + } + break + } +} + +// A function of type writeHeaders writes out +// http status with the given stream ID using the given framer. +type writeHeaders func(*http2.Framer, uint32, int) error + +func writeOneHeader(framer *http2.Framer, sid uint32, httpStatus int) error { + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + henc.WriteField(hpack.HeaderField{Name: ":status", Value: fmt.Sprint(httpStatus)}) + return framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: sid, + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) +} + +func writeTwoHeaders(framer *http2.Framer, sid uint32, httpStatus int) error { + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + henc.WriteField(hpack.HeaderField{ + Name: ":status", + Value: fmt.Sprint(http.StatusOK), + }) + if err := framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: sid, + BlockFragment: buf.Bytes(), + EndHeaders: true, + }); err != nil { + return err + } + buf.Reset() + henc.WriteField(hpack.HeaderField{ + Name: ":status", + Value: fmt.Sprint(httpStatus), + }) + return framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: sid, + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) +} + +type httpServer struct { + conn net.Conn + httpStatus int + wh writeHeaders +} + +func (s *httpServer) start(t *testing.T, lis net.Listener) { + // Launch an HTTP server to send back header with httpStatus. + go func() { + var err error + s.conn, err = lis.Accept() + if err != nil { + t.Errorf("Error accepting connection: %v", err) + return + } + defer s.conn.Close() + // Read preface sent by client. + if _, err = io.ReadFull(s.conn, make([]byte, len(http2.ClientPreface))); err != nil { + t.Errorf("Error at server-side while reading preface from cleint. Err: %v", err) + return + } + reader := bufio.NewReaderSize(s.conn, defaultWriteBufSize) + writer := bufio.NewWriterSize(s.conn, defaultReadBufSize) + framer := http2.NewFramer(writer, reader) + if err = framer.WriteSettingsAck(); err != nil { + t.Errorf("Error at server-side while sending Settings ack. Err: %v", err) + return + } + var sid uint32 + // Read frames until a header is received. + for { + frame, err := framer.ReadFrame() + if err != nil { + t.Errorf("Error at server-side while reading frame. Err: %v", err) + return + } + if hframe, ok := frame.(*http2.HeadersFrame); ok { + sid = hframe.Header().StreamID + break + } + } + if err = s.wh(framer, sid, s.httpStatus); err != nil { + t.Errorf("Error at server-side while writing headers. Err: %v", err) + return + } + writer.Flush() + }() +} + +func (s *httpServer) cleanUp() { + if s.conn != nil { + s.conn.Close() + } +} + +func setUpHTTPStatusTest(t *testing.T, httpStatus int, wh writeHeaders) (stream *Stream, cleanUp func()) { + var ( + err error + lis net.Listener + server *httpServer + client ClientTransport + ) + cleanUp = func() { + if lis != nil { + lis.Close() + } + if server != nil { + server.cleanUp() + } + if client != nil { + client.Close() + } + } + defer func() { + if err != nil { + cleanUp() + } + }() + lis, err = net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + server = &httpServer{ + httpStatus: httpStatus, + wh: wh, + } + server.start(t, lis) + client, err = newHTTP2Client(context.Background(), TargetInfo{Addr: lis.Addr().String()}, ConnectOptions{}, 2*time.Second) + if err != nil { + t.Fatalf("Error creating client. Err: %v", err) + } + stream, err = client.NewStream(context.Background(), &CallHdr{Method: "bogus/method", Flush: true}) + if err != nil { + t.Fatalf("Error creating stream at client-side. Err: %v", err) + } + return +} + +func TestHTTPToGRPCStatusMapping(t *testing.T) { + for k := range httpStatusConvTab { + testHTTPToGRPCStatusMapping(t, k, writeOneHeader) + } +} + +func testHTTPToGRPCStatusMapping(t *testing.T, httpStatus int, wh writeHeaders) { + stream, cleanUp := setUpHTTPStatusTest(t, httpStatus, wh) + defer cleanUp() + want := httpStatusConvTab[httpStatus] + buf := make([]byte, 8) + _, err := stream.Read(buf) + if err == nil { + t.Fatalf("Stream.Read(_) unexpectedly returned no error. Expected stream error with code %v", want) + } + serr, ok := err.(StreamError) + if !ok { + t.Fatalf("err.(Type) = %T, want StreamError", err) + } + if want != serr.Code { + t.Fatalf("Want error code: %v, got: %v", want, serr.Code) + } +} + +func TestHTTPStatusOKAndMissingGRPCStatus(t *testing.T) { + stream, cleanUp := setUpHTTPStatusTest(t, http.StatusOK, writeOneHeader) + defer cleanUp() + buf := make([]byte, 8) + _, err := stream.Read(buf) + if err != io.EOF { + t.Fatalf("stream.Read(_) = _, %v, want _, io.EOF", err) + } + want := codes.Unknown + stream.mu.Lock() + defer stream.mu.Unlock() + if stream.status.Code() != want { + t.Fatalf("Status code of stream: %v, want: %v", stream.status.Code(), want) + } +} + +func TestHTTPStatusNottOKAndMissingGRPCStatusInSecondHeader(t *testing.T) { + testHTTPToGRPCStatusMapping(t, http.StatusUnauthorized, writeTwoHeaders) +} + +// If any error occurs on a call to Stream.Read, future calls +// should continue to return that same error. +func TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) { + testRecvBuffer := newRecvBuffer() + s := &Stream{ + ctx: context.Background(), + goAway: make(chan struct{}), + buf: testRecvBuffer, + requestRead: func(int) {}, + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(int) {}, + } + testData := make([]byte, 1) + testData[0] = 5 + testErr := errors.New("test error") + s.write(recvMsg{data: testData, err: testErr}) + + inBuf := make([]byte, 1) + actualCount, actualErr := s.Read(inBuf) + if actualCount != 0 { + t.Errorf("actualCount, _ := s.Read(_) differs; want 0; got %v", actualCount) + } + if actualErr.Error() != testErr.Error() { + t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error()) + } + + s.write(recvMsg{data: testData, err: nil}) + s.write(recvMsg{data: testData, err: errors.New("different error from first")}) + + for i := 0; i < 2; i++ { + inBuf := make([]byte, 1) + actualCount, actualErr := s.Read(inBuf) + if actualCount != 0 { + t.Errorf("actualCount, _ := s.Read(_) differs; want %v; got %v", 0, actualCount) + } + if actualErr.Error() != testErr.Error() { + t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error()) + } + } +} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100755 index 000000000000..d006a426347c --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi + +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/golang/protobuf/protoc-gen-go \ + golang.org/x/tools/cmd/stringer + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read) + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD + +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ + (git status; git --no-pager diff; exit 1) +fi + +# TODO(menghanl): fix errors in transport_test. +staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... From 229bfd7fc32084846ccb608a10df8743bb022593 Mon Sep 17 00:00:00 2001 From: Michal Fojtik Date: Tue, 20 Feb 2018 11:04:27 +0100 Subject: [PATCH 2/4] UPSTREAM: 57480: Fix build and test errors from etcd 3.2.13 upgrade --- .../staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD | 1 + .../src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go | 2 +- vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go | 1 + vendor/k8s.io/kubernetes/test/integration/scale/BUILD | 1 + vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go | 1 + 5 files changed, 5 insertions(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD index ac48442e50d8..5d495404c6d0 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD @@ -13,6 +13,7 @@ go_library( "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", "//vendor/github.com/coreos/etcd/integration:go_default_library", "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index 01586fb98d47..96d21b5812e1 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -155,6 +155,7 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } + m.AuthToken = "simple" } else { cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -169,7 +170,6 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } - m.AuthToken = "simple" clusterStr := fmt.Sprintf("%s=http://%s", name, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go b/vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go index 9176ff7a0d72..8c40dc7797eb 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/services/etcd.go @@ -77,6 +77,7 @@ func NewEtcd(dataDir string) *EtcdServer { MaxWALFiles: maxWALFiles, TickMs: tickMs, ElectionTicks: electionTicks, + AuthToken: "simple", } return &EtcdServer{ diff --git a/vendor/k8s.io/kubernetes/test/integration/scale/BUILD b/vendor/k8s.io/kubernetes/test/integration/scale/BUILD index 120e556ce4bd..5fd7eb7c0938 100644 --- a/vendor/k8s.io/kubernetes/test/integration/scale/BUILD +++ b/vendor/k8s.io/kubernetes/test/integration/scale/BUILD @@ -14,6 +14,7 @@ go_test( deps = [ "//cmd/kube-apiserver/app/testing:go_default_library", "//test/integration/framework:go_default_library", + "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go b/vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go index a40093571c1e..fe92420ec1ec 100644 --- a/vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go +++ b/vendor/k8s.io/kubernetes/test/integration/scale/scale_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + _ "github.com/coreos/etcd/etcdserver/api/v3rpc" // Force package logger init. "github.com/coreos/pkg/capnslog" appsv1beta2 "k8s.io/api/apps/v1beta2" From 3050e6ca465babbe0734bec72c1006ab68f14cf7 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 23 Feb 2018 13:51:31 +0100 Subject: [PATCH 3/4] UPSTREAM: 60299: apiserver: fix testing etcd config for etcd 3.2.16 --- .../src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index 96d21b5812e1..1a8515aca13c 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -155,7 +155,6 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } - m.AuthToken = "simple" } else { cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -165,6 +164,7 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer } } + m.AuthToken = "simple" m.Name = name m.DataDir, err = ioutil.TempDir(baseDir, "etcd") if err != nil { From 373bdee91a0ae75efb45185f0375964d651615b2 Mon Sep 17 00:00:00 2001 From: Michal Fojtik Date: Mon, 26 Feb 2018 20:34:11 +0100 Subject: [PATCH 4/4] UPSTREAM: 60430: don't use storage cache during apiserver unit test --- vendor/k8s.io/kubernetes/pkg/master/master_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/pkg/master/master_test.go b/vendor/k8s.io/kubernetes/pkg/master/master_test.go index 506b81f9f6eb..043c6b9d3e68 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/master_test.go +++ b/vendor/k8s.io/kubernetes/pkg/master/master_test.go @@ -94,7 +94,10 @@ func setUp(t *testing.T) (*etcdtesting.EtcdTestServer, Config, informers.SharedI resourceEncoding.SetVersionEncoding(certificates.GroupName, *testapi.Certificates.GroupVersion(), schema.GroupVersion{Group: certificates.GroupName, Version: runtime.APIVersionInternal}) storageFactory := serverstorage.NewDefaultStorageFactory(*storageConfig, testapi.StorageMediaType(), legacyscheme.Codecs, resourceEncoding, DefaultAPIResourceConfigSource(), nil) - err := options.NewEtcdOptions(storageConfig).ApplyWithStorageFactoryTo(storageFactory, config.GenericConfig) + etcdOptions := options.NewEtcdOptions(storageConfig) + // unit tests don't need watch cache and it leaks lots of goroutines with etcd testing functions during unit tests + etcdOptions.EnableWatchCache = false + err := etcdOptions.ApplyWithStorageFactoryTo(storageFactory, config.GenericConfig) if err != nil { t.Fatal(err) }