Pārlūkot izejas kodu

Merge pull request #8 from rumpl/feat-client-connection

Configure client connection backoff
Guillaume LOURS 5 gadi atpakaļ
vecāks
revīzija
2a99ecdeec
65 mainītis faili ar 2908 papildinājumiem un 5425 dzēšanām
  1. 15 1
      client/client.go
  2. 4 3
      cmd/example.go
  3. 2 2
      example/backend/main.go
  4. 1 1
      vendor.conf
  5. 37 0
      vendor/github.com/gogo/protobuf/gogoproto/Makefile
  6. 45 0
      vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
  7. 0 118
      vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
  8. 0 2865
      vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
  9. 0 752
      vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
  10. 0 390
      vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
  11. 16 0
      vendor/google.golang.org/grpc/README.md
  12. 70 0
      vendor/google.golang.org/grpc/attributes/attributes.go
  13. 20 0
      vendor/google.golang.org/grpc/backoff.go
  14. 52 0
      vendor/google.golang.org/grpc/backoff/backoff.go
  15. 105 15
      vendor/google.golang.org/grpc/balancer/balancer.go
  16. 132 30
      vendor/google.golang.org/grpc/balancer/base/balancer.go
  17. 29 0
      vendor/google.golang.org/grpc/balancer/base/base.go
  18. 8 10
      vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
  19. 60 107
      vendor/google.golang.org/grpc/balancer_conn_wrappers.go
  20. 17 17
      vendor/google.golang.org/grpc/balancer_v1_wrapper.go
  21. 235 149
      vendor/google.golang.org/grpc/clientconn.go
  22. 100 181
      vendor/google.golang.org/grpc/credentials/credentials.go
  23. 0 0
      vendor/google.golang.org/grpc/credentials/go12.go
  24. 225 0
      vendor/google.golang.org/grpc/credentials/tls.go
  25. 61 21
      vendor/google.golang.org/grpc/dialoptions.go
  26. 4 0
      vendor/google.golang.org/grpc/encoding/encoding.go
  27. 6 9
      vendor/google.golang.org/grpc/go.mod
  28. 25 19
      vendor/google.golang.org/grpc/grpclog/grpclog.go
  29. 3 1
      vendor/google.golang.org/grpc/grpclog/logger.go
  30. 20 1
      vendor/google.golang.org/grpc/grpclog/loggerv2.go
  31. 21 21
      vendor/google.golang.org/grpc/health/client.go
  32. 60 44
      vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
  33. 5 5
      vendor/google.golang.org/grpc/health/server.go
  34. 11 16
      vendor/google.golang.org/grpc/internal/backoff/backoff.go
  35. 6 6
      vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
  36. 2 2
      vendor/google.golang.org/grpc/internal/binarylog/env_config.go
  37. 1 1
      vendor/google.golang.org/grpc/internal/binarylog/sink.go
  38. 85 0
      vendor/google.golang.org/grpc/internal/buffer/unbounded.go
  39. 17 5
      vendor/google.golang.org/grpc/internal/channelz/funcs.go
  40. 100 0
      vendor/google.golang.org/grpc/internal/channelz/logging.go
  41. 5 2
      vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
  42. 118 0
      vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
  43. 63 0
      vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
  44. 55 0
      vendor/google.golang.org/grpc/internal/grpcutil/target.go
  45. 8 7
      vendor/google.golang.org/grpc/internal/internal.go
  46. 99 115
      vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
  47. 33 0
      vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
  48. 2 2
      vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
  49. 4 8
      vendor/google.golang.org/grpc/internal/transport/controlbuf.go
  50. 7 3
      vendor/google.golang.org/grpc/internal/transport/handler_server.go
  51. 128 80
      vendor/google.golang.org/grpc/internal/transport/http2_client.go
  52. 91 61
      vendor/google.golang.org/grpc/internal/transport/http2_server.go
  53. 31 39
      vendor/google.golang.org/grpc/internal/transport/transport.go
  54. 102 70
      vendor/google.golang.org/grpc/picker_wrapper.go
  55. 65 24
      vendor/google.golang.org/grpc/pickfirst.go
  56. 75 15
      vendor/google.golang.org/grpc/resolver/resolver.go
  57. 126 72
      vendor/google.golang.org/grpc/resolver_conn_wrapper.go
  58. 50 24
      vendor/google.golang.org/grpc/rpc_util.go
  59. 202 72
      vendor/google.golang.org/grpc/server.go
  60. 22 17
      vendor/google.golang.org/grpc/service_config.go
  61. 7 14
      vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
  62. 11 0
      vendor/google.golang.org/grpc/stats/stats.go
  63. 3 4
      vendor/google.golang.org/grpc/stream.go
  64. 0 3
      vendor/google.golang.org/grpc/trace.go
  65. 1 1
      vendor/google.golang.org/grpc/version.go

+ 15 - 1
client/client.go

@@ -36,6 +36,7 @@ import (
 
 	v1 "github.com/docker/api/backend/v1"
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
 )
 
 // NewContext is a context that is canceled when a signal is
@@ -53,7 +54,20 @@ func NewContext() (context.Context, func()) {
 
 // New returns a GRPC client
 func New(address string, timeout time.Duration) (*Client, error) {
-	conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(timeout))
+	backoffConfig := backoff.DefaultConfig
+	backoffConfig.MaxDelay = 3 * time.Second
+	backoffConfig.BaseDelay = 10 * time.Millisecond
+	connParams := grpc.ConnectParams{
+		Backoff: backoffConfig,
+	}
+	opts := []grpc.DialOption{
+		grpc.WithInsecure(),
+		grpc.WithConnectParams(connParams),
+		grpc.WithBlock(),
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), timeout)
+	defer cancel()
+	conn, err := grpc.DialContext(ctx, address, opts...)
 	if err != nil {
 		return nil, err
 	}

+ 4 - 3
cmd/example.go

@@ -71,7 +71,7 @@ var exampleCommand = cli.Command{
 // factor out this into a context store package
 func current(ctx context.Context) context.Context {
 	// test backend address
-	return context.WithValue(ctx, backendAddressKey{}, "127.0.0.1:7654")
+	return context.WithValue(ctx, backendAddressKey{}, "/tmp/backend.sock")
 }
 
 func connect(ctx context.Context) (*client.Client, error) {
@@ -79,7 +79,7 @@ func connect(ctx context.Context) (*client.Client, error) {
 	if err != nil {
 		return nil, errors.Wrap(err, "no backend address")
 	}
-	c, err := client.New(address, 500*time.Millisecond)
+	c, err := client.New("unix://"+address, 500*time.Millisecond)
 	if err != nil {
 		if err != context.DeadlineExceeded {
 			return nil, errors.Wrap(err, "connect to backend")
@@ -91,7 +91,8 @@ func connect(ctx context.Context) (*client.Client, error) {
 		if err := cmd.Start(); err != nil {
 			return nil, errors.Wrap(err, "start backend")
 		}
-		return client.New(address, 2*time.Second)
+		cl, e := client.New("unix://"+address, 10*time.Second)
+		return cl, e
 	}
 	return c, nil
 }

+ 2 - 2
example/backend/main.go

@@ -74,9 +74,9 @@ func main() {
 		s := server.New()
 
 		// listen on a socket to accept connects
-		l, err := net.Listen("tcp", clix.GlobalString("address"))
+		l, err := net.Listen("unix", clix.GlobalString("address"))
 		if err != nil {
-			return errors.Wrap(err, "listen tcp")
+			return errors.Wrap(err, "listen unix socket")
 		}
 		defer l.Close()
 

+ 1 - 1
vendor.conf

@@ -5,7 +5,7 @@ github.com/pkg/errors 7f95ac13edff643b8ce5398b6ccab125f8a20c1a
 github.com/sirupsen/logrus 67a7fdcf741f4d5cee82cb9800994ccfd4393ad0
 golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
 golang.org/x/sys cf1e2d57716972ea102acf35e82471062c8906a1
-google.golang.org/grpc 39e8a7b072a67ca2a75f57fa2e0d50995f5b22f6
+google.golang.org/grpc v1.28.1
 github.com/golang/protobuf ed6926b37a637426117ccab59282c3839528a700
 google.golang.org/genproto 83cc0476cb11ea0da33dacd4c6354ab192de6fe6
 golang.org/x/net d06c31c94caefa2de32f9a8bcc857498fd9c1232

+ 37 - 0
vendor/github.com/gogo/protobuf/gogoproto/Makefile

@@ -0,0 +1,37 @@
+# Protocol Buffers for Go with Gadgets
+#
+# Copyright (c) 2013, The GoGo Authors. All rights reserved.
+# http://github.com/gogo/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+	go install github.com/gogo/protobuf/protoc-gen-gogo
+	protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto
+
+restore:
+	cp gogo.pb.golden gogo.pb.go
+
+preserve:
+	cp gogo.pb.go gogo.pb.golden

+ 45 - 0
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden

@@ -0,0 +1,45 @@
+// Code generated by protoc-gen-go.
+// source: gogo.proto
+// DO NOT EDIT!
+
+package gogoproto
+
+import proto "github.com/gogo/protobuf/proto"
+import json "encoding/json"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference proto, json, and math imports to suppress error if they are not otherwise used.
+var _ = proto.Marshal
+var _ = &json.SyntaxError{}
+var _ = math.Inf
+
+var E_Nullable = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         51235,
+	Name:          "gogoproto.nullable",
+	Tag:           "varint,51235,opt,name=nullable",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         51236,
+	Name:          "gogoproto.embed",
+	Tag:           "varint,51236,opt,name=embed",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         51237,
+	Name:          "gogoproto.customtype",
+	Tag:           "bytes,51237,opt,name=customtype",
+}
+
+func init() {
+	proto.RegisterExtension(E_Nullable)
+	proto.RegisterExtension(E_Embed)
+	proto.RegisterExtension(E_Customtype)
+}

+ 0 - 118
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go

@@ -1,118 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors.  All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Package descriptor provides functions for obtaining protocol buffer
-// descriptors for generated Go types.
-//
-// These functions cannot go in package proto because they depend on the
-// generated protobuf descriptor messages, which themselves depend on proto.
-package descriptor
-
-import (
-	"bytes"
-	"compress/gzip"
-	"fmt"
-	"io/ioutil"
-
-	"github.com/gogo/protobuf/proto"
-)
-
-// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
-func extractFile(gz []byte) (*FileDescriptorProto, error) {
-	r, err := gzip.NewReader(bytes.NewReader(gz))
-	if err != nil {
-		return nil, fmt.Errorf("failed to open gzip reader: %v", err)
-	}
-	defer r.Close()
-
-	b, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
-	}
-
-	fd := new(FileDescriptorProto)
-	if err := proto.Unmarshal(b, fd); err != nil {
-		return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
-	}
-
-	return fd, nil
-}
-
-// Message is a proto.Message with a method to return its descriptor.
-//
-// Message types generated by the protocol compiler always satisfy
-// the Message interface.
-type Message interface {
-	proto.Message
-	Descriptor() ([]byte, []int)
-}
-
-// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
-// describing the given message.
-func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
-	gz, path := msg.Descriptor()
-	fd, err := extractFile(gz)
-	if err != nil {
-		panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
-	}
-
-	md = fd.MessageType[path[0]]
-	for _, i := range path[1:] {
-		md = md.NestedType[i]
-	}
-	return fd, md
-}
-
-// Is this field a scalar numeric type?
-func (field *FieldDescriptorProto) IsScalar() bool {
-	if field.Type == nil {
-		return false
-	}
-	switch *field.Type {
-	case FieldDescriptorProto_TYPE_DOUBLE,
-		FieldDescriptorProto_TYPE_FLOAT,
-		FieldDescriptorProto_TYPE_INT64,
-		FieldDescriptorProto_TYPE_UINT64,
-		FieldDescriptorProto_TYPE_INT32,
-		FieldDescriptorProto_TYPE_FIXED64,
-		FieldDescriptorProto_TYPE_FIXED32,
-		FieldDescriptorProto_TYPE_BOOL,
-		FieldDescriptorProto_TYPE_UINT32,
-		FieldDescriptorProto_TYPE_ENUM,
-		FieldDescriptorProto_TYPE_SFIXED32,
-		FieldDescriptorProto_TYPE_SFIXED64,
-		FieldDescriptorProto_TYPE_SINT32,
-		FieldDescriptorProto_TYPE_SINT64:
-		return true
-	default:
-		return false
-	}
-}

+ 0 - 2865
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go

@@ -1,2865 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: descriptor.proto
-
-package descriptor
-
-import (
-	fmt "fmt"
-	proto "github.com/gogo/protobuf/proto"
-	math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-type FieldDescriptorProto_Type int32
-
-const (
-	// 0 is reserved for errors.
-	// Order is weird for historical reasons.
-	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
-	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
-	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
-	// negative values are likely.
-	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
-	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
-	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
-	// negative values are likely.
-	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
-	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
-	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
-	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
-	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
-	// Tag-delimited aggregate.
-	// Group type is deprecated and not supported in proto3. However, Proto3
-	// implementations should still be able to parse the group wire format and
-	// treat group fields as unknown fields.
-	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
-	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
-	// New in version 2.
-	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
-	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
-	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
-	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
-	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
-	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
-	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
-)
-
-var FieldDescriptorProto_Type_name = map[int32]string{
-	1:  "TYPE_DOUBLE",
-	2:  "TYPE_FLOAT",
-	3:  "TYPE_INT64",
-	4:  "TYPE_UINT64",
-	5:  "TYPE_INT32",
-	6:  "TYPE_FIXED64",
-	7:  "TYPE_FIXED32",
-	8:  "TYPE_BOOL",
-	9:  "TYPE_STRING",
-	10: "TYPE_GROUP",
-	11: "TYPE_MESSAGE",
-	12: "TYPE_BYTES",
-	13: "TYPE_UINT32",
-	14: "TYPE_ENUM",
-	15: "TYPE_SFIXED32",
-	16: "TYPE_SFIXED64",
-	17: "TYPE_SINT32",
-	18: "TYPE_SINT64",
-}
-
-var FieldDescriptorProto_Type_value = map[string]int32{
-	"TYPE_DOUBLE":   1,
-	"TYPE_FLOAT":    2,
-	"TYPE_INT64":    3,
-	"TYPE_UINT64":   4,
-	"TYPE_INT32":    5,
-	"TYPE_FIXED64":  6,
-	"TYPE_FIXED32":  7,
-	"TYPE_BOOL":     8,
-	"TYPE_STRING":   9,
-	"TYPE_GROUP":    10,
-	"TYPE_MESSAGE":  11,
-	"TYPE_BYTES":    12,
-	"TYPE_UINT32":   13,
-	"TYPE_ENUM":     14,
-	"TYPE_SFIXED32": 15,
-	"TYPE_SFIXED64": 16,
-	"TYPE_SINT32":   17,
-	"TYPE_SINT64":   18,
-}
-
-func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
-	p := new(FieldDescriptorProto_Type)
-	*p = x
-	return p
-}
-
-func (x FieldDescriptorProto_Type) String() string {
-	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
-}
-
-func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
-	if err != nil {
-		return err
-	}
-	*x = FieldDescriptorProto_Type(value)
-	return nil
-}
-
-func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{4, 0}
-}
-
-type FieldDescriptorProto_Label int32
-
-const (
-	// 0 is reserved for errors
-	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
-	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
-	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
-)
-
-var FieldDescriptorProto_Label_name = map[int32]string{
-	1: "LABEL_OPTIONAL",
-	2: "LABEL_REQUIRED",
-	3: "LABEL_REPEATED",
-}
-
-var FieldDescriptorProto_Label_value = map[string]int32{
-	"LABEL_OPTIONAL": 1,
-	"LABEL_REQUIRED": 2,
-	"LABEL_REPEATED": 3,
-}
-
-func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
-	p := new(FieldDescriptorProto_Label)
-	*p = x
-	return p
-}
-
-func (x FieldDescriptorProto_Label) String() string {
-	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
-}
-
-func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
-	if err != nil {
-		return err
-	}
-	*x = FieldDescriptorProto_Label(value)
-	return nil
-}
-
-func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{4, 1}
-}
-
-// Generated classes can be optimized for speed or code size.
-type FileOptions_OptimizeMode int32
-
-const (
-	FileOptions_SPEED FileOptions_OptimizeMode = 1
-	// etc.
-	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
-	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
-)
-
-var FileOptions_OptimizeMode_name = map[int32]string{
-	1: "SPEED",
-	2: "CODE_SIZE",
-	3: "LITE_RUNTIME",
-}
-
-var FileOptions_OptimizeMode_value = map[string]int32{
-	"SPEED":        1,
-	"CODE_SIZE":    2,
-	"LITE_RUNTIME": 3,
-}
-
-func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
-	p := new(FileOptions_OptimizeMode)
-	*p = x
-	return p
-}
-
-func (x FileOptions_OptimizeMode) String() string {
-	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
-}
-
-func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
-	if err != nil {
-		return err
-	}
-	*x = FileOptions_OptimizeMode(value)
-	return nil
-}
-
-func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{10, 0}
-}
-
-type FieldOptions_CType int32
-
-const (
-	// Default mode.
-	FieldOptions_STRING       FieldOptions_CType = 0
-	FieldOptions_CORD         FieldOptions_CType = 1
-	FieldOptions_STRING_PIECE FieldOptions_CType = 2
-)
-
-var FieldOptions_CType_name = map[int32]string{
-	0: "STRING",
-	1: "CORD",
-	2: "STRING_PIECE",
-}
-
-var FieldOptions_CType_value = map[string]int32{
-	"STRING":       0,
-	"CORD":         1,
-	"STRING_PIECE": 2,
-}
-
-func (x FieldOptions_CType) Enum() *FieldOptions_CType {
-	p := new(FieldOptions_CType)
-	*p = x
-	return p
-}
-
-func (x FieldOptions_CType) String() string {
-	return proto.EnumName(FieldOptions_CType_name, int32(x))
-}
-
-func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
-	if err != nil {
-		return err
-	}
-	*x = FieldOptions_CType(value)
-	return nil
-}
-
-func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{12, 0}
-}
-
-type FieldOptions_JSType int32
-
-const (
-	// Use the default type.
-	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
-	// Use JavaScript strings.
-	FieldOptions_JS_STRING FieldOptions_JSType = 1
-	// Use JavaScript numbers.
-	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
-)
-
-var FieldOptions_JSType_name = map[int32]string{
-	0: "JS_NORMAL",
-	1: "JS_STRING",
-	2: "JS_NUMBER",
-}
-
-var FieldOptions_JSType_value = map[string]int32{
-	"JS_NORMAL": 0,
-	"JS_STRING": 1,
-	"JS_NUMBER": 2,
-}
-
-func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
-	p := new(FieldOptions_JSType)
-	*p = x
-	return p
-}
-
-func (x FieldOptions_JSType) String() string {
-	return proto.EnumName(FieldOptions_JSType_name, int32(x))
-}
-
-func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
-	if err != nil {
-		return err
-	}
-	*x = FieldOptions_JSType(value)
-	return nil
-}
-
-func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{12, 1}
-}
-
-// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
-// or neither? HTTP based RPC implementation may choose GET verb for safe
-// methods, and PUT verb for idempotent methods instead of the default POST.
-type MethodOptions_IdempotencyLevel int32
-
-const (
-	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
-	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
-	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
-)
-
-var MethodOptions_IdempotencyLevel_name = map[int32]string{
-	0: "IDEMPOTENCY_UNKNOWN",
-	1: "NO_SIDE_EFFECTS",
-	2: "IDEMPOTENT",
-}
-
-var MethodOptions_IdempotencyLevel_value = map[string]int32{
-	"IDEMPOTENCY_UNKNOWN": 0,
-	"NO_SIDE_EFFECTS":     1,
-	"IDEMPOTENT":          2,
-}
-
-func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
-	p := new(MethodOptions_IdempotencyLevel)
-	*p = x
-	return p
-}
-
-func (x MethodOptions_IdempotencyLevel) String() string {
-	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
-}
-
-func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
-	if err != nil {
-		return err
-	}
-	*x = MethodOptions_IdempotencyLevel(value)
-	return nil
-}
-
-func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{17, 0}
-}
-
-// The protocol compiler can output a FileDescriptorSet containing the .proto
-// files it parses.
-type FileDescriptorSet struct {
-	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
-func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorSet) ProtoMessage()    {}
-func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{0}
-}
-func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
-}
-func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
-}
-func (m *FileDescriptorSet) XXX_Size() int {
-	return xxx_messageInfo_FileDescriptorSet.Size(m)
-}
-func (m *FileDescriptorSet) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
-
-func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
-	if m != nil {
-		return m.File
-	}
-	return nil
-}
-
-// Describes a complete .proto file.
-type FileDescriptorProto struct {
-	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
-	// Names of files imported by this file.
-	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
-	// Indexes of the public imported files in the dependency list above.
-	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
-	// Indexes of the weak imported files in the dependency list.
-	// For Google-internal migration only. Do not use.
-	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
-	// All top-level definitions in this file.
-	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
-	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
-	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
-	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
-	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
-	// This field contains optional information about the original source code.
-	// You may safely remove this entire field without harming runtime
-	// functionality of the descriptors -- the information is needed only by
-	// development tools.
-	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
-	// The syntax of the proto file.
-	// The supported values are "proto2" and "proto3".
-	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
-func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorProto) ProtoMessage()    {}
-func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{1}
-}
-func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
-}
-func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
-}
-func (m *FileDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_FileDescriptorProto.Size(m)
-}
-func (m *FileDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
-
-func (m *FileDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *FileDescriptorProto) GetPackage() string {
-	if m != nil && m.Package != nil {
-		return *m.Package
-	}
-	return ""
-}
-
-func (m *FileDescriptorProto) GetDependency() []string {
-	if m != nil {
-		return m.Dependency
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetPublicDependency() []int32 {
-	if m != nil {
-		return m.PublicDependency
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetWeakDependency() []int32 {
-	if m != nil {
-		return m.WeakDependency
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
-	if m != nil {
-		return m.MessageType
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
-	if m != nil {
-		return m.EnumType
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
-	if m != nil {
-		return m.Service
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
-	if m != nil {
-		return m.Extension
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetOptions() *FileOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
-	if m != nil {
-		return m.SourceCodeInfo
-	}
-	return nil
-}
-
-func (m *FileDescriptorProto) GetSyntax() string {
-	if m != nil && m.Syntax != nil {
-		return *m.Syntax
-	}
-	return ""
-}
-
-// Describes a message type.
-type DescriptorProto struct {
-	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
-	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
-	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
-	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
-	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
-	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
-	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
-	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
-	// Reserved field names, which may not be used by fields in the same message.
-	// A given name may only be reserved once.
-	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
-func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto) ProtoMessage()    {}
-func (*DescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{2}
-}
-func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
-}
-func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto.Merge(m, src)
-}
-func (m *DescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_DescriptorProto.Size(m)
-}
-func (m *DescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
-
-func (m *DescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
-	if m != nil {
-		return m.Field
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
-	if m != nil {
-		return m.Extension
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
-	if m != nil {
-		return m.NestedType
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
-	if m != nil {
-		return m.EnumType
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
-	if m != nil {
-		return m.ExtensionRange
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
-	if m != nil {
-		return m.OneofDecl
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetOptions() *MessageOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
-	if m != nil {
-		return m.ReservedRange
-	}
-	return nil
-}
-
-func (m *DescriptorProto) GetReservedName() []string {
-	if m != nil {
-		return m.ReservedName
-	}
-	return nil
-}
-
-type DescriptorProto_ExtensionRange struct {
-	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
-	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
-	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
-	XXX_unrecognized     []byte                 `json:"-"`
-	XXX_sizecache        int32                  `json:"-"`
-}
-
-func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
-func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
-func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{2, 0}
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
-	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
-}
-func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
-	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Range of reserved tag numbers. Reserved tag numbers may not be used by
-// fields or extension ranges in the same message. Reserved ranges may
-// not overlap.
-type DescriptorProto_ReservedRange struct {
-	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
-	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
-func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
-func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
-func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{2, 1}
-}
-func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
-}
-func (m *DescriptorProto_ReservedRange) XXX_Size() int {
-	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
-}
-func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
-	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
-
-func (m *DescriptorProto_ReservedRange) GetStart() int32 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-type ExtensionRangeOptions struct {
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
-func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRangeOptions) ProtoMessage()    {}
-func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{3}
-}
-
-var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_ExtensionRangeOptions
-}
-
-func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
-}
-func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
-}
-func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
-}
-func (m *ExtensionRangeOptions) XXX_Size() int {
-	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
-}
-func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
-
-func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-// Describes a field within a message.
-type FieldDescriptorProto struct {
-	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
-	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
-	// If type_name is set, this need not be set.  If both this and type_name
-	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
-	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
-	// For message and enum types, this is the name of the type.  If the name
-	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
-	// rules are used to find the type (i.e. first the nested types within this
-	// message are searched, then within the parent, on up to the root
-	// namespace).
-	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
-	// For extensions, this is the name of the type being extended.  It is
-	// resolved in the same manner as type_name.
-	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
-	// For numeric types, contains the original text representation of the value.
-	// For booleans, "true" or "false".
-	// For strings, contains the default text contents (not escaped in any way).
-	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
-	// TODO(kenton):  Base-64 encode?
-	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
-	// If set, gives the index of a oneof in the containing type's oneof_decl
-	// list.  This field is a member of that oneof.
-	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
-	// JSON name of this field. The value is set by protocol compiler. If the
-	// user has set a "json_name" option on this field, that option's value
-	// will be used. Otherwise, it's deduced from the field's name by converting
-	// it to camelCase.
-	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
-	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
-func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*FieldDescriptorProto) ProtoMessage()    {}
-func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{4}
-}
-func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
-}
-func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
-}
-func (m *FieldDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_FieldDescriptorProto.Size(m)
-}
-func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
-
-func (m *FieldDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetNumber() int32 {
-	if m != nil && m.Number != nil {
-		return *m.Number
-	}
-	return 0
-}
-
-func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
-	if m != nil && m.Label != nil {
-		return *m.Label
-	}
-	return FieldDescriptorProto_LABEL_OPTIONAL
-}
-
-func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
-	if m != nil && m.Type != nil {
-		return *m.Type
-	}
-	return FieldDescriptorProto_TYPE_DOUBLE
-}
-
-func (m *FieldDescriptorProto) GetTypeName() string {
-	if m != nil && m.TypeName != nil {
-		return *m.TypeName
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetExtendee() string {
-	if m != nil && m.Extendee != nil {
-		return *m.Extendee
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetDefaultValue() string {
-	if m != nil && m.DefaultValue != nil {
-		return *m.DefaultValue
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetOneofIndex() int32 {
-	if m != nil && m.OneofIndex != nil {
-		return *m.OneofIndex
-	}
-	return 0
-}
-
-func (m *FieldDescriptorProto) GetJsonName() string {
-	if m != nil && m.JsonName != nil {
-		return *m.JsonName
-	}
-	return ""
-}
-
-func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes a oneof.
-type OneofDescriptorProto struct {
-	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
-func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*OneofDescriptorProto) ProtoMessage()    {}
-func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{5}
-}
-func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
-}
-func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
-}
-func (m *OneofDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_OneofDescriptorProto.Size(m)
-}
-func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
-
-func (m *OneofDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes an enum type.
-type EnumDescriptorProto struct {
-	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
-	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	// Range of reserved numeric values. Reserved numeric values may not be used
-	// by enum values in the same enum declaration. Reserved ranges may not
-	// overlap.
-	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
-	// Reserved enum value names, which may not be reused. A given name may only
-	// be reserved once.
-	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
-func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto) ProtoMessage()    {}
-func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{6}
-}
-func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
-}
-func (m *EnumDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_EnumDescriptorProto.Size(m)
-}
-func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
-	if m != nil {
-		return m.ReservedRange
-	}
-	return nil
-}
-
-func (m *EnumDescriptorProto) GetReservedName() []string {
-	if m != nil {
-		return m.ReservedName
-	}
-	return nil
-}
-
-// Range of reserved numeric values. Reserved values may not be used by
-// entries in the same enum. Reserved ranges may not overlap.
-//
-// Note that this is distinct from DescriptorProto.ReservedRange in that it
-// is inclusive such that it can appropriately represent the entire int32
-// domain.
-type EnumDescriptorProto_EnumReservedRange struct {
-	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
-	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
-func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
-func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
-func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{6, 0}
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
-	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
-}
-func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
-	if m != nil && m.Start != nil {
-		return *m.Start
-	}
-	return 0
-}
-
-func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-// Describes a value within an enum.
-type EnumValueDescriptorProto struct {
-	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
-	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
-func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*EnumValueDescriptorProto) ProtoMessage()    {}
-func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{7}
-}
-func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
-}
-func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
-}
-func (m *EnumValueDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
-}
-func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
-
-func (m *EnumValueDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *EnumValueDescriptorProto) GetNumber() int32 {
-	if m != nil && m.Number != nil {
-		return *m.Number
-	}
-	return 0
-}
-
-func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes a service.
-type ServiceDescriptorProto struct {
-	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
-	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
-	XXX_unrecognized     []byte                   `json:"-"`
-	XXX_sizecache        int32                    `json:"-"`
-}
-
-func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
-func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*ServiceDescriptorProto) ProtoMessage()    {}
-func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{8}
-}
-func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
-}
-func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
-}
-func (m *ServiceDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
-}
-func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
-
-func (m *ServiceDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
-	if m != nil {
-		return m.Method
-	}
-	return nil
-}
-
-func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-// Describes a method of a service.
-type MethodDescriptorProto struct {
-	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	// Input and output type names.  These are resolved in the same way as
-	// FieldDescriptorProto.type_name, but must refer to a message type.
-	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
-	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
-	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
-	// Identifies if client streams multiple client messages
-	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
-	// Identifies if server streams multiple server messages
-	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
-func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
-func (*MethodDescriptorProto) ProtoMessage()    {}
-func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{9}
-}
-func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
-}
-func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
-}
-func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
-}
-func (m *MethodDescriptorProto) XXX_Size() int {
-	return xxx_messageInfo_MethodDescriptorProto.Size(m)
-}
-func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
-	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
-
-const Default_MethodDescriptorProto_ClientStreaming bool = false
-const Default_MethodDescriptorProto_ServerStreaming bool = false
-
-func (m *MethodDescriptorProto) GetName() string {
-	if m != nil && m.Name != nil {
-		return *m.Name
-	}
-	return ""
-}
-
-func (m *MethodDescriptorProto) GetInputType() string {
-	if m != nil && m.InputType != nil {
-		return *m.InputType
-	}
-	return ""
-}
-
-func (m *MethodDescriptorProto) GetOutputType() string {
-	if m != nil && m.OutputType != nil {
-		return *m.OutputType
-	}
-	return ""
-}
-
-func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
-	if m != nil {
-		return m.Options
-	}
-	return nil
-}
-
-func (m *MethodDescriptorProto) GetClientStreaming() bool {
-	if m != nil && m.ClientStreaming != nil {
-		return *m.ClientStreaming
-	}
-	return Default_MethodDescriptorProto_ClientStreaming
-}
-
-func (m *MethodDescriptorProto) GetServerStreaming() bool {
-	if m != nil && m.ServerStreaming != nil {
-		return *m.ServerStreaming
-	}
-	return Default_MethodDescriptorProto_ServerStreaming
-}
-
-type FileOptions struct {
-	// Sets the Java package where classes generated from this .proto will be
-	// placed.  By default, the proto package is used, but this is often
-	// inappropriate because proto packages do not normally start with backwards
-	// domain names.
-	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
-	// If set, all the classes from the .proto file are wrapped in a single
-	// outer class with the given name.  This applies to both Proto1
-	// (equivalent to the old "--one_java_file" option) and Proto2 (where
-	// a .proto always translates to a single class, but you may want to
-	// explicitly choose the class name).
-	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
-	// If set true, then the Java code generator will generate a separate .java
-	// file for each top-level message, enum, and service defined in the .proto
-	// file.  Thus, these types will *not* be nested inside the outer class
-	// named by java_outer_classname.  However, the outer class will still be
-	// generated to contain the file's getDescriptor() method as well as any
-	// top-level extensions defined in the file.
-	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
-	// This option does nothing.
-	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
-	// If set true, then the Java2 code generator will generate code that
-	// throws an exception whenever an attempt is made to assign a non-UTF-8
-	// byte sequence to a string field.
-	// Message reflection will do the same.
-	// However, an extension field still accepts non-UTF-8 byte sequences.
-	// This option has no effect on when used with the lite runtime.
-	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
-	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
-	// Sets the Go package where structs generated from this .proto will be
-	// placed. If omitted, the Go package will be derived from the following:
-	//   - The basename of the package import path, if provided.
-	//   - Otherwise, the package statement in the .proto file, if present.
-	//   - Otherwise, the basename of the .proto file, without extension.
-	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
-	// Should generic services be generated in each language?  "Generic" services
-	// are not specific to any particular RPC system.  They are generated by the
-	// main code generators in each language (without additional plugins).
-	// Generic services were the only kind of service generation supported by
-	// early versions of google.protobuf.
-	//
-	// Generic services are now considered deprecated in favor of using plugins
-	// that generate code specific to your particular RPC system.  Therefore,
-	// these default to false.  Old code which depends on generic services should
-	// explicitly set them to true.
-	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
-	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
-	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
-	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
-	// Is this file deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for everything in the file, or it will be completely ignored; in the very
-	// least, this is a formalization for deprecating files.
-	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// Enables the use of arenas for the proto messages in this file. This applies
-	// only to generated classes for C++.
-	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
-	// Sets the objective c class prefix which is prepended to all objective c
-	// generated classes from this .proto. There is no default.
-	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
-	// Namespace for generated classes; defaults to the package.
-	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
-	// By default Swift generators will take the proto package and CamelCase it
-	// replacing '.' with underscore and use that to prefix the types/symbols
-	// defined. When this options is provided, they will use this value instead
-	// to prefix the types/symbols defined.
-	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
-	// Sets the php class prefix which is prepended to all php generated classes
-	// from this .proto. Default is empty.
-	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
-	// Use this option to change the namespace of php generated classes. Default
-	// is empty. When this option is empty, the package name will be used for
-	// determining the namespace.
-	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
-	// Use this option to change the namespace of php generated metadata classes.
-	// Default is empty. When this option is empty, the proto file name will be
-	// used for determining the namespace.
-	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
-	// Use this option to change the package of ruby generated classes. Default
-	// is empty. When this option is not set, the package name will be used for
-	// determining the ruby package.
-	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
-	// The parser stores options it doesn't recognize here.
-	// See the documentation for the "Options" section above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *FileOptions) Reset()         { *m = FileOptions{} }
-func (m *FileOptions) String() string { return proto.CompactTextString(m) }
-func (*FileOptions) ProtoMessage()    {}
-func (*FileOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{10}
-}
-
-var extRange_FileOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_FileOptions
-}
-
-func (m *FileOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
-}
-func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
-}
-func (m *FileOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileOptions.Merge(m, src)
-}
-func (m *FileOptions) XXX_Size() int {
-	return xxx_messageInfo_FileOptions.Size(m)
-}
-func (m *FileOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileOptions proto.InternalMessageInfo
-
-const Default_FileOptions_JavaMultipleFiles bool = false
-const Default_FileOptions_JavaStringCheckUtf8 bool = false
-const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
-const Default_FileOptions_CcGenericServices bool = false
-const Default_FileOptions_JavaGenericServices bool = false
-const Default_FileOptions_PyGenericServices bool = false
-const Default_FileOptions_PhpGenericServices bool = false
-const Default_FileOptions_Deprecated bool = false
-const Default_FileOptions_CcEnableArenas bool = false
-
-func (m *FileOptions) GetJavaPackage() string {
-	if m != nil && m.JavaPackage != nil {
-		return *m.JavaPackage
-	}
-	return ""
-}
-
-func (m *FileOptions) GetJavaOuterClassname() string {
-	if m != nil && m.JavaOuterClassname != nil {
-		return *m.JavaOuterClassname
-	}
-	return ""
-}
-
-func (m *FileOptions) GetJavaMultipleFiles() bool {
-	if m != nil && m.JavaMultipleFiles != nil {
-		return *m.JavaMultipleFiles
-	}
-	return Default_FileOptions_JavaMultipleFiles
-}
-
-// Deprecated: Do not use.
-func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
-	if m != nil && m.JavaGenerateEqualsAndHash != nil {
-		return *m.JavaGenerateEqualsAndHash
-	}
-	return false
-}
-
-func (m *FileOptions) GetJavaStringCheckUtf8() bool {
-	if m != nil && m.JavaStringCheckUtf8 != nil {
-		return *m.JavaStringCheckUtf8
-	}
-	return Default_FileOptions_JavaStringCheckUtf8
-}
-
-func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
-	if m != nil && m.OptimizeFor != nil {
-		return *m.OptimizeFor
-	}
-	return Default_FileOptions_OptimizeFor
-}
-
-func (m *FileOptions) GetGoPackage() string {
-	if m != nil && m.GoPackage != nil {
-		return *m.GoPackage
-	}
-	return ""
-}
-
-func (m *FileOptions) GetCcGenericServices() bool {
-	if m != nil && m.CcGenericServices != nil {
-		return *m.CcGenericServices
-	}
-	return Default_FileOptions_CcGenericServices
-}
-
-func (m *FileOptions) GetJavaGenericServices() bool {
-	if m != nil && m.JavaGenericServices != nil {
-		return *m.JavaGenericServices
-	}
-	return Default_FileOptions_JavaGenericServices
-}
-
-func (m *FileOptions) GetPyGenericServices() bool {
-	if m != nil && m.PyGenericServices != nil {
-		return *m.PyGenericServices
-	}
-	return Default_FileOptions_PyGenericServices
-}
-
-func (m *FileOptions) GetPhpGenericServices() bool {
-	if m != nil && m.PhpGenericServices != nil {
-		return *m.PhpGenericServices
-	}
-	return Default_FileOptions_PhpGenericServices
-}
-
-func (m *FileOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_FileOptions_Deprecated
-}
-
-func (m *FileOptions) GetCcEnableArenas() bool {
-	if m != nil && m.CcEnableArenas != nil {
-		return *m.CcEnableArenas
-	}
-	return Default_FileOptions_CcEnableArenas
-}
-
-func (m *FileOptions) GetObjcClassPrefix() string {
-	if m != nil && m.ObjcClassPrefix != nil {
-		return *m.ObjcClassPrefix
-	}
-	return ""
-}
-
-func (m *FileOptions) GetCsharpNamespace() string {
-	if m != nil && m.CsharpNamespace != nil {
-		return *m.CsharpNamespace
-	}
-	return ""
-}
-
-func (m *FileOptions) GetSwiftPrefix() string {
-	if m != nil && m.SwiftPrefix != nil {
-		return *m.SwiftPrefix
-	}
-	return ""
-}
-
-func (m *FileOptions) GetPhpClassPrefix() string {
-	if m != nil && m.PhpClassPrefix != nil {
-		return *m.PhpClassPrefix
-	}
-	return ""
-}
-
-func (m *FileOptions) GetPhpNamespace() string {
-	if m != nil && m.PhpNamespace != nil {
-		return *m.PhpNamespace
-	}
-	return ""
-}
-
-func (m *FileOptions) GetPhpMetadataNamespace() string {
-	if m != nil && m.PhpMetadataNamespace != nil {
-		return *m.PhpMetadataNamespace
-	}
-	return ""
-}
-
-func (m *FileOptions) GetRubyPackage() string {
-	if m != nil && m.RubyPackage != nil {
-		return *m.RubyPackage
-	}
-	return ""
-}
-
-func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type MessageOptions struct {
-	// Set true to use the old proto1 MessageSet wire format for extensions.
-	// This is provided for backwards-compatibility with the MessageSet wire
-	// format.  You should not use this for any other reason:  It's less
-	// efficient, has fewer features, and is more complicated.
-	//
-	// The message must be defined exactly as follows:
-	//   message Foo {
-	//     option message_set_wire_format = true;
-	//     extensions 4 to max;
-	//   }
-	// Note that the message cannot have any defined fields; MessageSets only
-	// have extensions.
-	//
-	// All extensions of your type must be singular messages; e.g. they cannot
-	// be int32s, enums, or repeated messages.
-	//
-	// Because this is an option, the above two restrictions are not enforced by
-	// the protocol compiler.
-	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
-	// Disables the generation of the standard "descriptor()" accessor, which can
-	// conflict with a field of the same name.  This is meant to make migration
-	// from proto1 easier; new code should avoid fields named "descriptor".
-	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
-	// Is this message deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the message, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating messages.
-	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// Whether the message is an automatically generated map entry type for the
-	// maps field.
-	//
-	// For maps fields:
-	//     map<KeyType, ValueType> map_field = 1;
-	// The parsed descriptor looks like:
-	//     message MapFieldEntry {
-	//         option map_entry = true;
-	//         optional KeyType key = 1;
-	//         optional ValueType value = 2;
-	//     }
-	//     repeated MapFieldEntry map_field = 1;
-	//
-	// Implementations may choose not to generate the map_entry=true message, but
-	// use a native map in the target language to hold the keys and values.
-	// The reflection APIs in such implementations still need to work as
-	// if the field is a repeated message field.
-	//
-	// NOTE: Do not set the option in .proto files. Always use the maps syntax
-	// instead. The option should only be implicitly set by the proto compiler
-	// parser.
-	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
-func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
-func (*MessageOptions) ProtoMessage()    {}
-func (*MessageOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{11}
-}
-
-var extRange_MessageOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_MessageOptions
-}
-
-func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
-}
-func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
-}
-func (m *MessageOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MessageOptions.Merge(m, src)
-}
-func (m *MessageOptions) XXX_Size() int {
-	return xxx_messageInfo_MessageOptions.Size(m)
-}
-func (m *MessageOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
-
-const Default_MessageOptions_MessageSetWireFormat bool = false
-const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
-const Default_MessageOptions_Deprecated bool = false
-
-func (m *MessageOptions) GetMessageSetWireFormat() bool {
-	if m != nil && m.MessageSetWireFormat != nil {
-		return *m.MessageSetWireFormat
-	}
-	return Default_MessageOptions_MessageSetWireFormat
-}
-
-func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
-	if m != nil && m.NoStandardDescriptorAccessor != nil {
-		return *m.NoStandardDescriptorAccessor
-	}
-	return Default_MessageOptions_NoStandardDescriptorAccessor
-}
-
-func (m *MessageOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_MessageOptions_Deprecated
-}
-
-func (m *MessageOptions) GetMapEntry() bool {
-	if m != nil && m.MapEntry != nil {
-		return *m.MapEntry
-	}
-	return false
-}
-
-func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type FieldOptions struct {
-	// The ctype option instructs the C++ code generator to use a different
-	// representation of the field than it normally would.  See the specific
-	// options below.  This option is not yet implemented in the open source
-	// release -- sorry, we'll try to include it in a future version!
-	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
-	// The packed option can be enabled for repeated primitive fields to enable
-	// a more efficient representation on the wire. Rather than repeatedly
-	// writing the tag and type for each element, the entire array is encoded as
-	// a single length-delimited blob. In proto3, only explicit setting it to
-	// false will avoid using packed encoding.
-	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
-	// The jstype option determines the JavaScript type used for values of the
-	// field.  The option is permitted only for 64 bit integral and fixed types
-	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
-	// is represented as JavaScript string, which avoids loss of precision that
-	// can happen when a large value is converted to a floating point JavaScript.
-	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
-	// use the JavaScript "number" type.  The behavior of the default option
-	// JS_NORMAL is implementation dependent.
-	//
-	// This option is an enum to permit additional types to be added, e.g.
-	// goog.math.Integer.
-	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
-	// Should this field be parsed lazily?  Lazy applies only to message-type
-	// fields.  It means that when the outer message is initially parsed, the
-	// inner message's contents will not be parsed but instead stored in encoded
-	// form.  The inner message will actually be parsed when it is first accessed.
-	//
-	// This is only a hint.  Implementations are free to choose whether to use
-	// eager or lazy parsing regardless of the value of this option.  However,
-	// setting this option true suggests that the protocol author believes that
-	// using lazy parsing on this field is worth the additional bookkeeping
-	// overhead typically needed to implement it.
-	//
-	// This option does not affect the public interface of any generated code;
-	// all method signatures remain the same.  Furthermore, thread-safety of the
-	// interface is not affected by this option; const methods remain safe to
-	// call from multiple threads concurrently, while non-const methods continue
-	// to require exclusive access.
-	//
-	//
-	// Note that implementations may choose not to check required fields within
-	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
-	// may return true even if the inner message has missing required fields.
-	// This is necessary because otherwise the inner message would have to be
-	// parsed in order to perform the check, defeating the purpose of lazy
-	// parsing.  An implementation which chooses not to check required fields
-	// must be consistent about it.  That is, for any particular sub-message, the
-	// implementation must either *always* check its required fields, or *never*
-	// check its required fields, regardless of whether or not the message has
-	// been parsed.
-	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
-	// Is this field deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for accessors, or it will be completely ignored; in the very least, this
-	// is a formalization for deprecating fields.
-	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// For Google-internal migration only. Do not use.
-	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
-func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
-func (*FieldOptions) ProtoMessage()    {}
-func (*FieldOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{12}
-}
-
-var extRange_FieldOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_FieldOptions
-}
-
-func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
-}
-func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
-}
-func (m *FieldOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FieldOptions.Merge(m, src)
-}
-func (m *FieldOptions) XXX_Size() int {
-	return xxx_messageInfo_FieldOptions.Size(m)
-}
-func (m *FieldOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
-
-const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
-const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
-const Default_FieldOptions_Lazy bool = false
-const Default_FieldOptions_Deprecated bool = false
-const Default_FieldOptions_Weak bool = false
-
-func (m *FieldOptions) GetCtype() FieldOptions_CType {
-	if m != nil && m.Ctype != nil {
-		return *m.Ctype
-	}
-	return Default_FieldOptions_Ctype
-}
-
-func (m *FieldOptions) GetPacked() bool {
-	if m != nil && m.Packed != nil {
-		return *m.Packed
-	}
-	return false
-}
-
-func (m *FieldOptions) GetJstype() FieldOptions_JSType {
-	if m != nil && m.Jstype != nil {
-		return *m.Jstype
-	}
-	return Default_FieldOptions_Jstype
-}
-
-func (m *FieldOptions) GetLazy() bool {
-	if m != nil && m.Lazy != nil {
-		return *m.Lazy
-	}
-	return Default_FieldOptions_Lazy
-}
-
-func (m *FieldOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_FieldOptions_Deprecated
-}
-
-func (m *FieldOptions) GetWeak() bool {
-	if m != nil && m.Weak != nil {
-		return *m.Weak
-	}
-	return Default_FieldOptions_Weak
-}
-
-func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type OneofOptions struct {
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
-func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
-func (*OneofOptions) ProtoMessage()    {}
-func (*OneofOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{13}
-}
-
-var extRange_OneofOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_OneofOptions
-}
-
-func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
-}
-func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
-}
-func (m *OneofOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_OneofOptions.Merge(m, src)
-}
-func (m *OneofOptions) XXX_Size() int {
-	return xxx_messageInfo_OneofOptions.Size(m)
-}
-func (m *OneofOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
-
-func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type EnumOptions struct {
-	// Set this option to true to allow mapping different tag names to the same
-	// value.
-	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
-	// Is this enum deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the enum, or it will be completely ignored; in the very least, this
-	// is a formalization for deprecating enums.
-	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
-func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumOptions) ProtoMessage()    {}
-func (*EnumOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{14}
-}
-
-var extRange_EnumOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_EnumOptions
-}
-
-func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
-}
-func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
-}
-func (m *EnumOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumOptions.Merge(m, src)
-}
-func (m *EnumOptions) XXX_Size() int {
-	return xxx_messageInfo_EnumOptions.Size(m)
-}
-func (m *EnumOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
-
-const Default_EnumOptions_Deprecated bool = false
-
-func (m *EnumOptions) GetAllowAlias() bool {
-	if m != nil && m.AllowAlias != nil {
-		return *m.AllowAlias
-	}
-	return false
-}
-
-func (m *EnumOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_EnumOptions_Deprecated
-}
-
-func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type EnumValueOptions struct {
-	// Is this enum value deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the enum value, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating enum values.
-	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
-func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
-func (*EnumValueOptions) ProtoMessage()    {}
-func (*EnumValueOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{15}
-}
-
-var extRange_EnumValueOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_EnumValueOptions
-}
-
-func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
-}
-func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
-}
-func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EnumValueOptions.Merge(m, src)
-}
-func (m *EnumValueOptions) XXX_Size() int {
-	return xxx_messageInfo_EnumValueOptions.Size(m)
-}
-func (m *EnumValueOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
-
-const Default_EnumValueOptions_Deprecated bool = false
-
-func (m *EnumValueOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_EnumValueOptions_Deprecated
-}
-
-func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type ServiceOptions struct {
-	// Is this service deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the service, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating services.
-	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
-func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
-func (*ServiceOptions) ProtoMessage()    {}
-func (*ServiceOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{16}
-}
-
-var extRange_ServiceOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_ServiceOptions
-}
-
-func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
-}
-func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
-}
-func (m *ServiceOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceOptions.Merge(m, src)
-}
-func (m *ServiceOptions) XXX_Size() int {
-	return xxx_messageInfo_ServiceOptions.Size(m)
-}
-func (m *ServiceOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
-
-const Default_ServiceOptions_Deprecated bool = false
-
-func (m *ServiceOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_ServiceOptions_Deprecated
-}
-
-func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-type MethodOptions struct {
-	// Is this method deprecated?
-	// Depending on the target platform, this can emit Deprecated annotations
-	// for the method, or it will be completely ignored; in the very least,
-	// this is a formalization for deprecating methods.
-	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
-	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
-	// The parser stores options it doesn't recognize here. See above.
-	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
-	proto.XXX_InternalExtensions `json:"-"`
-	XXX_unrecognized             []byte `json:"-"`
-	XXX_sizecache                int32  `json:"-"`
-}
-
-func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
-func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
-func (*MethodOptions) ProtoMessage()    {}
-func (*MethodOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{17}
-}
-
-var extRange_MethodOptions = []proto.ExtensionRange{
-	{Start: 1000, End: 536870911},
-}
-
-func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
-	return extRange_MethodOptions
-}
-
-func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
-}
-func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
-}
-func (m *MethodOptions) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MethodOptions.Merge(m, src)
-}
-func (m *MethodOptions) XXX_Size() int {
-	return xxx_messageInfo_MethodOptions.Size(m)
-}
-func (m *MethodOptions) XXX_DiscardUnknown() {
-	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
-
-const Default_MethodOptions_Deprecated bool = false
-const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
-
-func (m *MethodOptions) GetDeprecated() bool {
-	if m != nil && m.Deprecated != nil {
-		return *m.Deprecated
-	}
-	return Default_MethodOptions_Deprecated
-}
-
-func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
-	if m != nil && m.IdempotencyLevel != nil {
-		return *m.IdempotencyLevel
-	}
-	return Default_MethodOptions_IdempotencyLevel
-}
-
-func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
-	if m != nil {
-		return m.UninterpretedOption
-	}
-	return nil
-}
-
-// A message representing a option the parser does not recognize. This only
-// appears in options protos created by the compiler::Parser class.
-// DescriptorPool resolves these when building Descriptor objects. Therefore,
-// options protos in descriptor objects (e.g. returned by Descriptor::options(),
-// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
-// in them.
-type UninterpretedOption struct {
-	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
-	// The value of the uninterpreted option, in whatever type the tokenizer
-	// identified it as during parsing. Exactly one of these should be set.
-	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
-	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
-	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
-	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
-	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
-	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
-func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption) ProtoMessage()    {}
-func (*UninterpretedOption) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{18}
-}
-func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
-}
-func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
-}
-func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UninterpretedOption.Merge(m, src)
-}
-func (m *UninterpretedOption) XXX_Size() int {
-	return xxx_messageInfo_UninterpretedOption.Size(m)
-}
-func (m *UninterpretedOption) XXX_DiscardUnknown() {
-	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
-
-func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
-	if m != nil {
-		return m.Name
-	}
-	return nil
-}
-
-func (m *UninterpretedOption) GetIdentifierValue() string {
-	if m != nil && m.IdentifierValue != nil {
-		return *m.IdentifierValue
-	}
-	return ""
-}
-
-func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
-	if m != nil && m.PositiveIntValue != nil {
-		return *m.PositiveIntValue
-	}
-	return 0
-}
-
-func (m *UninterpretedOption) GetNegativeIntValue() int64 {
-	if m != nil && m.NegativeIntValue != nil {
-		return *m.NegativeIntValue
-	}
-	return 0
-}
-
-func (m *UninterpretedOption) GetDoubleValue() float64 {
-	if m != nil && m.DoubleValue != nil {
-		return *m.DoubleValue
-	}
-	return 0
-}
-
-func (m *UninterpretedOption) GetStringValue() []byte {
-	if m != nil {
-		return m.StringValue
-	}
-	return nil
-}
-
-func (m *UninterpretedOption) GetAggregateValue() string {
-	if m != nil && m.AggregateValue != nil {
-		return *m.AggregateValue
-	}
-	return ""
-}
-
-// The name of the uninterpreted option.  Each string represents a segment in
-// a dot-separated name.  is_extension is true iff a segment represents an
-// extension (denoted with parentheses in options specs in .proto files).
-// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
-// "foo.(bar.baz).qux".
-type UninterpretedOption_NamePart struct {
-	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
-	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
-func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
-func (*UninterpretedOption_NamePart) ProtoMessage()    {}
-func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{18, 0}
-}
-func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
-}
-func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
-}
-func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
-}
-func (m *UninterpretedOption_NamePart) XXX_Size() int {
-	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
-}
-func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
-	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
-
-func (m *UninterpretedOption_NamePart) GetNamePart() string {
-	if m != nil && m.NamePart != nil {
-		return *m.NamePart
-	}
-	return ""
-}
-
-func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
-	if m != nil && m.IsExtension != nil {
-		return *m.IsExtension
-	}
-	return false
-}
-
-// Encapsulates information about the original source file from which a
-// FileDescriptorProto was generated.
-type SourceCodeInfo struct {
-	// A Location identifies a piece of source code in a .proto file which
-	// corresponds to a particular definition.  This information is intended
-	// to be useful to IDEs, code indexers, documentation generators, and similar
-	// tools.
-	//
-	// For example, say we have a file like:
-	//   message Foo {
-	//     optional string foo = 1;
-	//   }
-	// Let's look at just the field definition:
-	//   optional string foo = 1;
-	//   ^       ^^     ^^  ^  ^^^
-	//   a       bc     de  f  ghi
-	// We have the following locations:
-	//   span   path               represents
-	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
-	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
-	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
-	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
-	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
-	//
-	// Notes:
-	// - A location may refer to a repeated field itself (i.e. not to any
-	//   particular index within it).  This is used whenever a set of elements are
-	//   logically enclosed in a single code segment.  For example, an entire
-	//   extend block (possibly containing multiple extension definitions) will
-	//   have an outer location whose path refers to the "extensions" repeated
-	//   field without an index.
-	// - Multiple locations may have the same path.  This happens when a single
-	//   logical declaration is spread out across multiple places.  The most
-	//   obvious example is the "extend" block again -- there may be multiple
-	//   extend blocks in the same scope, each of which will have the same path.
-	// - A location's span is not always a subset of its parent's span.  For
-	//   example, the "extendee" of an extension declaration appears at the
-	//   beginning of the "extend" block and is shared by all extensions within
-	//   the block.
-	// - Just because a location's span is a subset of some other location's span
-	//   does not mean that it is a descendant.  For example, a "group" defines
-	//   both a type and a field in a single declaration.  Thus, the locations
-	//   corresponding to the type and field and their components will overlap.
-	// - Code which tries to interpret locations should probably be designed to
-	//   ignore those that it doesn't understand, as more types of locations could
-	//   be recorded in the future.
-	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
-	XXX_unrecognized     []byte                     `json:"-"`
-	XXX_sizecache        int32                      `json:"-"`
-}
-
-func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
-func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo) ProtoMessage()    {}
-func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{19}
-}
-func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
-}
-func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
-}
-func (m *SourceCodeInfo) XXX_Size() int {
-	return xxx_messageInfo_SourceCodeInfo.Size(m)
-}
-func (m *SourceCodeInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
-
-func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
-	if m != nil {
-		return m.Location
-	}
-	return nil
-}
-
-type SourceCodeInfo_Location struct {
-	// Identifies which part of the FileDescriptorProto was defined at this
-	// location.
-	//
-	// Each element is a field number or an index.  They form a path from
-	// the root FileDescriptorProto to the place where the definition.  For
-	// example, this path:
-	//   [ 4, 3, 2, 7, 1 ]
-	// refers to:
-	//   file.message_type(3)  // 4, 3
-	//       .field(7)         // 2, 7
-	//       .name()           // 1
-	// This is because FileDescriptorProto.message_type has field number 4:
-	//   repeated DescriptorProto message_type = 4;
-	// and DescriptorProto.field has field number 2:
-	//   repeated FieldDescriptorProto field = 2;
-	// and FieldDescriptorProto.name has field number 1:
-	//   optional string name = 1;
-	//
-	// Thus, the above path gives the location of a field name.  If we removed
-	// the last element:
-	//   [ 4, 3, 2, 7 ]
-	// this path refers to the whole field declaration (from the beginning
-	// of the label to the terminating semicolon).
-	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
-	// Always has exactly three or four elements: start line, start column,
-	// end line (optional, otherwise assumed same as start line), end column.
-	// These are packed into a single field for efficiency.  Note that line
-	// and column numbers are zero-based -- typically you will want to add
-	// 1 to each before displaying to a user.
-	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
-	// If this SourceCodeInfo represents a complete declaration, these are any
-	// comments appearing before and after the declaration which appear to be
-	// attached to the declaration.
-	//
-	// A series of line comments appearing on consecutive lines, with no other
-	// tokens appearing on those lines, will be treated as a single comment.
-	//
-	// leading_detached_comments will keep paragraphs of comments that appear
-	// before (but not connected to) the current element. Each paragraph,
-	// separated by empty lines, will be one comment element in the repeated
-	// field.
-	//
-	// Only the comment content is provided; comment markers (e.g. //) are
-	// stripped out.  For block comments, leading whitespace and an asterisk
-	// will be stripped from the beginning of each line other than the first.
-	// Newlines are included in the output.
-	//
-	// Examples:
-	//
-	//   optional int32 foo = 1;  // Comment attached to foo.
-	//   // Comment attached to bar.
-	//   optional int32 bar = 2;
-	//
-	//   optional string baz = 3;
-	//   // Comment attached to baz.
-	//   // Another line attached to baz.
-	//
-	//   // Comment attached to qux.
-	//   //
-	//   // Another line attached to qux.
-	//   optional double qux = 4;
-	//
-	//   // Detached comment for corge. This is not leading or trailing comments
-	//   // to qux or corge because there are blank lines separating it from
-	//   // both.
-	//
-	//   // Detached comment for corge paragraph 2.
-	//
-	//   optional string corge = 5;
-	//   /* Block comment attached
-	//    * to corge.  Leading asterisks
-	//    * will be removed. */
-	//   /* Block comment attached to
-	//    * grault. */
-	//   optional int32 grault = 6;
-	//
-	//   // ignored detached comments.
-	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
-	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
-	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
-	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
-	XXX_unrecognized        []byte   `json:"-"`
-	XXX_sizecache           int32    `json:"-"`
-}
-
-func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
-func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
-func (*SourceCodeInfo_Location) ProtoMessage()    {}
-func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{19, 0}
-}
-func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
-}
-func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
-}
-func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
-}
-func (m *SourceCodeInfo_Location) XXX_Size() int {
-	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
-}
-func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
-	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
-
-func (m *SourceCodeInfo_Location) GetPath() []int32 {
-	if m != nil {
-		return m.Path
-	}
-	return nil
-}
-
-func (m *SourceCodeInfo_Location) GetSpan() []int32 {
-	if m != nil {
-		return m.Span
-	}
-	return nil
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingComments() string {
-	if m != nil && m.LeadingComments != nil {
-		return *m.LeadingComments
-	}
-	return ""
-}
-
-func (m *SourceCodeInfo_Location) GetTrailingComments() string {
-	if m != nil && m.TrailingComments != nil {
-		return *m.TrailingComments
-	}
-	return ""
-}
-
-func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
-	if m != nil {
-		return m.LeadingDetachedComments
-	}
-	return nil
-}
-
-// Describes the relationship between generated code and its original source
-// file. A GeneratedCodeInfo message is associated with only one generated
-// source file, but may contain references to different source .proto files.
-type GeneratedCodeInfo struct {
-	// An Annotation connects some span of text in generated code to an element
-	// of its generating .proto file.
-	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
-	XXX_unrecognized     []byte                          `json:"-"`
-	XXX_sizecache        int32                           `json:"-"`
-}
-
-func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
-func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo) ProtoMessage()    {}
-func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{20}
-}
-func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
-}
-func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
-}
-func (m *GeneratedCodeInfo) XXX_Size() int {
-	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
-}
-func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
-	if m != nil {
-		return m.Annotation
-	}
-	return nil
-}
-
-type GeneratedCodeInfo_Annotation struct {
-	// Identifies the element in the original source .proto file. This field
-	// is formatted the same as SourceCodeInfo.Location.path.
-	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
-	// Identifies the filesystem path to the original source .proto.
-	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
-	// Identifies the starting offset in bytes in the generated code
-	// that relates to the identified object.
-	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
-	// Identifies the ending offset in bytes in the generated code that
-	// relates to the identified offset. The end offset should be one past
-	// the last relevant byte (so the length of the text = end - begin).
-	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
-func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
-func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
-func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
-	return fileDescriptor_308767df5ffe18af, []int{20, 0}
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
-	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
-}
-func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
-	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
-
-func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
-	if m != nil {
-		return m.Path
-	}
-	return nil
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
-	if m != nil && m.SourceFile != nil {
-		return *m.SourceFile
-	}
-	return ""
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
-	if m != nil && m.Begin != nil {
-		return *m.Begin
-	}
-	return 0
-}
-
-func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
-	if m != nil && m.End != nil {
-		return *m.End
-	}
-	return 0
-}
-
-func init() {
-	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
-	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
-	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
-	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
-	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
-	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
-	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
-	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
-	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
-	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
-	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
-	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
-	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
-	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
-	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
-	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
-	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
-	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
-	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
-	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
-	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
-	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
-	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
-	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
-	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
-	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
-	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
-	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
-	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
-	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
-	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
-	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
-	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
-}
-
-func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) }
-
-var fileDescriptor_308767df5ffe18af = []byte{
-	// 2522 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8,
-	0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66,
-	0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe,
-	0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89,
-	0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80,
-	0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66,
-	0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f,
-	0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63,
-	0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e,
-	0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec,
-	0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2,
-	0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e,
-	0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2,
-	0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39,
-	0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd,
-	0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41,
-	0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22,
-	0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa,
-	0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4,
-	0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7,
-	0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d,
-	0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e,
-	0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12,
-	0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d,
-	0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2,
-	0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1,
-	0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba,
-	0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60,
-	0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77,
-	0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24,
-	0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06,
-	0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a,
-	0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92,
-	0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6,
-	0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c,
-	0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7,
-	0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f,
-	0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd,
-	0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07,
-	0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95,
-	0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77,
-	0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e,
-	0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8,
-	0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69,
-	0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0,
-	0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05,
-	0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46,
-	0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f,
-	0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c,
-	0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3,
-	0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5,
-	0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95,
-	0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a,
-	0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07,
-	0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2,
-	0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f,
-	0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42,
-	0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e,
-	0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4,
-	0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90,
-	0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae,
-	0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d,
-	0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e,
-	0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58,
-	0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9,
-	0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f,
-	0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4,
-	0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15,
-	0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf,
-	0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba,
-	0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6,
-	0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01,
-	0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73,
-	0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb,
-	0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1,
-	0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7,
-	0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f,
-	0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78,
-	0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a,
-	0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba,
-	0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49,
-	0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48,
-	0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee,
-	0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0,
-	0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a,
-	0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63,
-	0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2,
-	0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59,
-	0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35,
-	0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd,
-	0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee,
-	0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b,
-	0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf,
-	0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8,
-	0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31,
-	0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53,
-	0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8,
-	0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8,
-	0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d,
-	0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81,
-	0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8,
-	0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f,
-	0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9,
-	0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03,
-	0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff,
-	0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d,
-	0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0,
-	0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8,
-	0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4,
-	0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a,
-	0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86,
-	0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71,
-	0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76,
-	0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35,
-	0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b,
-	0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7,
-	0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e,
-	0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd,
-	0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01,
-	0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55,
-	0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41,
-	0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79,
-	0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7,
-	0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c,
-	0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd,
-	0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99,
-	0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88,
-	0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95,
-	0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed,
-	0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea,
-	0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d,
-	0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee,
-	0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4,
-	0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25,
-	0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0,
-	0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97,
-	0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94,
-	0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22,
-	0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43,
-	0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80,
-	0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd,
-	0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77,
-	0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75,
-	0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4,
-	0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11,
-	0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb,
-	0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c,
-	0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0,
-	0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d,
-	0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07,
-	0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39,
-	0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80,
-	0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42,
-	0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c,
-	0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8,
-	0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7,
-	0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00,
-	0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00,
-}

+ 0 - 752
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go

@@ -1,752 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: descriptor.proto
-
-package descriptor
-
-import (
-	fmt "fmt"
-	github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-	proto "github.com/gogo/protobuf/proto"
-	math "math"
-	reflect "reflect"
-	sort "sort"
-	strconv "strconv"
-	strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-func (this *FileDescriptorSet) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&descriptor.FileDescriptorSet{")
-	if this.File != nil {
-		s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *FileDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 16)
-	s = append(s, "&descriptor.FileDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Package != nil {
-		s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
-	}
-	if this.Dependency != nil {
-		s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
-	}
-	if this.PublicDependency != nil {
-		s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
-	}
-	if this.WeakDependency != nil {
-		s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
-	}
-	if this.MessageType != nil {
-		s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
-	}
-	if this.EnumType != nil {
-		s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
-	}
-	if this.Service != nil {
-		s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
-	}
-	if this.Extension != nil {
-		s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.SourceCodeInfo != nil {
-		s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
-	}
-	if this.Syntax != nil {
-		s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *DescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 14)
-	s = append(s, "&descriptor.DescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Field != nil {
-		s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
-	}
-	if this.Extension != nil {
-		s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
-	}
-	if this.NestedType != nil {
-		s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
-	}
-	if this.EnumType != nil {
-		s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
-	}
-	if this.ExtensionRange != nil {
-		s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
-	}
-	if this.OneofDecl != nil {
-		s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.ReservedRange != nil {
-		s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
-	}
-	if this.ReservedName != nil {
-		s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *DescriptorProto_ExtensionRange) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
-	if this.Start != nil {
-		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
-	}
-	if this.End != nil {
-		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *DescriptorProto_ReservedRange) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
-	if this.Start != nil {
-		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
-	}
-	if this.End != nil {
-		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *ExtensionRangeOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&descriptor.ExtensionRangeOptions{")
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *FieldDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 14)
-	s = append(s, "&descriptor.FieldDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Number != nil {
-		s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
-	}
-	if this.Label != nil {
-		s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
-	}
-	if this.Type != nil {
-		s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
-	}
-	if this.TypeName != nil {
-		s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
-	}
-	if this.Extendee != nil {
-		s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
-	}
-	if this.DefaultValue != nil {
-		s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
-	}
-	if this.OneofIndex != nil {
-		s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
-	}
-	if this.JsonName != nil {
-		s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *OneofDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&descriptor.OneofDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *EnumDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 9)
-	s = append(s, "&descriptor.EnumDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Value != nil {
-		s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.ReservedRange != nil {
-		s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
-	}
-	if this.ReservedName != nil {
-		s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
-	if this.Start != nil {
-		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
-	}
-	if this.End != nil {
-		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *EnumValueDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&descriptor.EnumValueDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Number != nil {
-		s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *ServiceDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&descriptor.ServiceDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.Method != nil {
-		s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *MethodDescriptorProto) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 10)
-	s = append(s, "&descriptor.MethodDescriptorProto{")
-	if this.Name != nil {
-		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
-	}
-	if this.InputType != nil {
-		s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
-	}
-	if this.OutputType != nil {
-		s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
-	}
-	if this.Options != nil {
-		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
-	}
-	if this.ClientStreaming != nil {
-		s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
-	}
-	if this.ServerStreaming != nil {
-		s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *FileOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 25)
-	s = append(s, "&descriptor.FileOptions{")
-	if this.JavaPackage != nil {
-		s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
-	}
-	if this.JavaOuterClassname != nil {
-		s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
-	}
-	if this.JavaMultipleFiles != nil {
-		s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
-	}
-	if this.JavaGenerateEqualsAndHash != nil {
-		s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
-	}
-	if this.JavaStringCheckUtf8 != nil {
-		s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
-	}
-	if this.OptimizeFor != nil {
-		s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
-	}
-	if this.GoPackage != nil {
-		s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
-	}
-	if this.CcGenericServices != nil {
-		s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
-	}
-	if this.JavaGenericServices != nil {
-		s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
-	}
-	if this.PyGenericServices != nil {
-		s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
-	}
-	if this.PhpGenericServices != nil {
-		s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
-	}
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.CcEnableArenas != nil {
-		s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
-	}
-	if this.ObjcClassPrefix != nil {
-		s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
-	}
-	if this.CsharpNamespace != nil {
-		s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
-	}
-	if this.SwiftPrefix != nil {
-		s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
-	}
-	if this.PhpClassPrefix != nil {
-		s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
-	}
-	if this.PhpNamespace != nil {
-		s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
-	}
-	if this.PhpMetadataNamespace != nil {
-		s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
-	}
-	if this.RubyPackage != nil {
-		s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *MessageOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 9)
-	s = append(s, "&descriptor.MessageOptions{")
-	if this.MessageSetWireFormat != nil {
-		s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
-	}
-	if this.NoStandardDescriptorAccessor != nil {
-		s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
-	}
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.MapEntry != nil {
-		s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *FieldOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 11)
-	s = append(s, "&descriptor.FieldOptions{")
-	if this.Ctype != nil {
-		s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
-	}
-	if this.Packed != nil {
-		s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
-	}
-	if this.Jstype != nil {
-		s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
-	}
-	if this.Lazy != nil {
-		s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
-	}
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.Weak != nil {
-		s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *OneofOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&descriptor.OneofOptions{")
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *EnumOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&descriptor.EnumOptions{")
-	if this.AllowAlias != nil {
-		s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
-	}
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *EnumValueOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&descriptor.EnumValueOptions{")
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *ServiceOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&descriptor.ServiceOptions{")
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *MethodOptions) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&descriptor.MethodOptions{")
-	if this.Deprecated != nil {
-		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
-	}
-	if this.IdempotencyLevel != nil {
-		s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
-	}
-	if this.UninterpretedOption != nil {
-		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
-	}
-	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *UninterpretedOption) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 11)
-	s = append(s, "&descriptor.UninterpretedOption{")
-	if this.Name != nil {
-		s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
-	}
-	if this.IdentifierValue != nil {
-		s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
-	}
-	if this.PositiveIntValue != nil {
-		s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
-	}
-	if this.NegativeIntValue != nil {
-		s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
-	}
-	if this.DoubleValue != nil {
-		s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
-	}
-	if this.StringValue != nil {
-		s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
-	}
-	if this.AggregateValue != nil {
-		s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *UninterpretedOption_NamePart) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 6)
-	s = append(s, "&descriptor.UninterpretedOption_NamePart{")
-	if this.NamePart != nil {
-		s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
-	}
-	if this.IsExtension != nil {
-		s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *SourceCodeInfo) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&descriptor.SourceCodeInfo{")
-	if this.Location != nil {
-		s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *SourceCodeInfo_Location) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 9)
-	s = append(s, "&descriptor.SourceCodeInfo_Location{")
-	if this.Path != nil {
-		s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
-	}
-	if this.Span != nil {
-		s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
-	}
-	if this.LeadingComments != nil {
-		s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
-	}
-	if this.TrailingComments != nil {
-		s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
-	}
-	if this.LeadingDetachedComments != nil {
-		s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *GeneratedCodeInfo) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 5)
-	s = append(s, "&descriptor.GeneratedCodeInfo{")
-	if this.Annotation != nil {
-		s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func (this *GeneratedCodeInfo_Annotation) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 8)
-	s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
-	if this.Path != nil {
-		s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
-	}
-	if this.SourceFile != nil {
-		s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
-	}
-	if this.Begin != nil {
-		s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
-	}
-	if this.End != nil {
-		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
-	}
-	if this.XXX_unrecognized != nil {
-		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
-	}
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringDescriptor(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
-	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
-	if e == nil {
-		return "nil"
-	}
-	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "})"
-	return s
-}

+ 0 - 390
vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go

@@ -1,390 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package descriptor
-
-import (
-	"strings"
-)
-
-func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
-	if !msg.GetOptions().GetMapEntry() {
-		return nil, nil
-	}
-	return msg.GetField()[0], msg.GetField()[1]
-}
-
-func dotToUnderscore(r rune) rune {
-	if r == '.' {
-		return '_'
-	}
-	return r
-}
-
-func (field *FieldDescriptorProto) WireType() (wire int) {
-	switch *field.Type {
-	case FieldDescriptorProto_TYPE_DOUBLE:
-		return 1
-	case FieldDescriptorProto_TYPE_FLOAT:
-		return 5
-	case FieldDescriptorProto_TYPE_INT64:
-		return 0
-	case FieldDescriptorProto_TYPE_UINT64:
-		return 0
-	case FieldDescriptorProto_TYPE_INT32:
-		return 0
-	case FieldDescriptorProto_TYPE_UINT32:
-		return 0
-	case FieldDescriptorProto_TYPE_FIXED64:
-		return 1
-	case FieldDescriptorProto_TYPE_FIXED32:
-		return 5
-	case FieldDescriptorProto_TYPE_BOOL:
-		return 0
-	case FieldDescriptorProto_TYPE_STRING:
-		return 2
-	case FieldDescriptorProto_TYPE_GROUP:
-		return 2
-	case FieldDescriptorProto_TYPE_MESSAGE:
-		return 2
-	case FieldDescriptorProto_TYPE_BYTES:
-		return 2
-	case FieldDescriptorProto_TYPE_ENUM:
-		return 0
-	case FieldDescriptorProto_TYPE_SFIXED32:
-		return 5
-	case FieldDescriptorProto_TYPE_SFIXED64:
-		return 1
-	case FieldDescriptorProto_TYPE_SINT32:
-		return 0
-	case FieldDescriptorProto_TYPE_SINT64:
-		return 0
-	}
-	panic("unreachable")
-}
-
-func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
-	packed := field.IsPacked()
-	wireType := field.WireType()
-	fieldNumber := field.GetNumber()
-	if packed {
-		wireType = 2
-	}
-	x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
-	return x
-}
-
-func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
-	packed := field.IsPacked3()
-	wireType := field.WireType()
-	fieldNumber := field.GetNumber()
-	if packed {
-		wireType = 2
-	}
-	x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
-	return x
-}
-
-func (field *FieldDescriptorProto) GetKey() []byte {
-	x := field.GetKeyUint64()
-	i := 0
-	keybuf := make([]byte, 0)
-	for i = 0; x > 127; i++ {
-		keybuf = append(keybuf, 0x80|uint8(x&0x7F))
-		x >>= 7
-	}
-	keybuf = append(keybuf, uint8(x))
-	return keybuf
-}
-
-func (field *FieldDescriptorProto) GetKey3() []byte {
-	x := field.GetKey3Uint64()
-	i := 0
-	keybuf := make([]byte, 0)
-	for i = 0; x > 127; i++ {
-		keybuf = append(keybuf, 0x80|uint8(x&0x7F))
-		x >>= 7
-	}
-	keybuf = append(keybuf, uint8(x))
-	return keybuf
-}
-
-func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
-	msg := desc.GetMessage(packageName, messageName)
-	if msg == nil {
-		return nil
-	}
-	for _, field := range msg.GetField() {
-		if field.GetName() == fieldName {
-			return field
-		}
-	}
-	return nil
-}
-
-func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
-	for _, msg := range file.GetMessageType() {
-		if msg.GetName() == typeName {
-			return msg
-		}
-		nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
-		if nes != nil {
-			return nes
-		}
-	}
-	return nil
-}
-
-func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
-	for _, nes := range msg.GetNestedType() {
-		if nes.GetName() == typeName {
-			return nes
-		}
-		res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
-		if res != nil {
-			return res
-		}
-	}
-	return nil
-}
-
-func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
-	for _, file := range desc.GetFile() {
-		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
-			continue
-		}
-		for _, msg := range file.GetMessageType() {
-			if msg.GetName() == typeName {
-				return msg
-			}
-		}
-		for _, msg := range file.GetMessageType() {
-			for _, nes := range msg.GetNestedType() {
-				if nes.GetName() == typeName {
-					return nes
-				}
-				if msg.GetName()+"."+nes.GetName() == typeName {
-					return nes
-				}
-			}
-		}
-	}
-	return nil
-}
-
-func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
-	for _, file := range desc.GetFile() {
-		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
-			continue
-		}
-		for _, msg := range file.GetMessageType() {
-			if msg.GetName() == typeName {
-				return file.GetSyntax() == "proto3"
-			}
-		}
-		for _, msg := range file.GetMessageType() {
-			for _, nes := range msg.GetNestedType() {
-				if nes.GetName() == typeName {
-					return file.GetSyntax() == "proto3"
-				}
-				if msg.GetName()+"."+nes.GetName() == typeName {
-					return file.GetSyntax() == "proto3"
-				}
-			}
-		}
-	}
-	return false
-}
-
-func (msg *DescriptorProto) IsExtendable() bool {
-	return len(msg.GetExtensionRange()) > 0
-}
-
-func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
-	parent := desc.GetMessage(packageName, typeName)
-	if parent == nil {
-		return "", nil
-	}
-	if !parent.IsExtendable() {
-		return "", nil
-	}
-	extendee := "." + packageName + "." + typeName
-	for _, file := range desc.GetFile() {
-		for _, ext := range file.GetExtension() {
-			if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
-				if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
-					continue
-				}
-			} else {
-				if ext.GetExtendee() != extendee {
-					continue
-				}
-			}
-			if ext.GetName() == fieldName {
-				return file.GetPackage(), ext
-			}
-		}
-	}
-	return "", nil
-}
-
-func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
-	parent := desc.GetMessage(packageName, typeName)
-	if parent == nil {
-		return "", nil
-	}
-	if !parent.IsExtendable() {
-		return "", nil
-	}
-	extendee := "." + packageName + "." + typeName
-	for _, file := range desc.GetFile() {
-		for _, ext := range file.GetExtension() {
-			if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
-				if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
-					continue
-				}
-			} else {
-				if ext.GetExtendee() != extendee {
-					continue
-				}
-			}
-			if ext.GetNumber() == fieldNum {
-				return file.GetPackage(), ext
-			}
-		}
-	}
-	return "", nil
-}
-
-func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
-	parent := desc.GetMessage(packageName, typeName)
-	if parent == nil {
-		return "", ""
-	}
-	field := parent.GetFieldDescriptor(fieldName)
-	if field == nil {
-		var extPackageName string
-		extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
-		if field == nil {
-			return "", ""
-		}
-		packageName = extPackageName
-	}
-	typeNames := strings.Split(field.GetTypeName(), ".")
-	if len(typeNames) == 1 {
-		msg := desc.GetMessage(packageName, typeName)
-		if msg == nil {
-			return "", ""
-		}
-		return packageName, msg.GetName()
-	}
-	if len(typeNames) > 2 {
-		for i := 1; i < len(typeNames)-1; i++ {
-			packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
-			typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
-			msg := desc.GetMessage(packageName, typeName)
-			if msg != nil {
-				typeNames := strings.Split(msg.GetName(), ".")
-				if len(typeNames) == 1 {
-					return packageName, msg.GetName()
-				}
-				return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
-			}
-		}
-	}
-	return "", ""
-}
-
-func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
-	for _, field := range msg.GetField() {
-		if field.GetName() == fieldName {
-			return field
-		}
-	}
-	return nil
-}
-
-func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
-	for _, file := range desc.GetFile() {
-		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
-			continue
-		}
-		for _, enum := range file.GetEnumType() {
-			if enum.GetName() == typeName {
-				return enum
-			}
-		}
-	}
-	return nil
-}
-
-func (f *FieldDescriptorProto) IsEnum() bool {
-	return *f.Type == FieldDescriptorProto_TYPE_ENUM
-}
-
-func (f *FieldDescriptorProto) IsMessage() bool {
-	return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
-}
-
-func (f *FieldDescriptorProto) IsBytes() bool {
-	return *f.Type == FieldDescriptorProto_TYPE_BYTES
-}
-
-func (f *FieldDescriptorProto) IsRepeated() bool {
-	return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
-}
-
-func (f *FieldDescriptorProto) IsString() bool {
-	return *f.Type == FieldDescriptorProto_TYPE_STRING
-}
-
-func (f *FieldDescriptorProto) IsBool() bool {
-	return *f.Type == FieldDescriptorProto_TYPE_BOOL
-}
-
-func (f *FieldDescriptorProto) IsRequired() bool {
-	return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
-}
-
-func (f *FieldDescriptorProto) IsPacked() bool {
-	return f.Options != nil && f.GetOptions().GetPacked()
-}
-
-func (f *FieldDescriptorProto) IsPacked3() bool {
-	if f.IsRepeated() && f.IsScalar() {
-		if f.Options == nil || f.GetOptions().Packed == nil {
-			return true
-		}
-		return f.Options != nil && f.GetOptions().GetPacked()
-	}
-	return false
-}
-
-func (m *DescriptorProto) HasExtension() bool {
-	return len(m.ExtensionRange) > 0
-}

+ 16 - 0
vendor/google.golang.org/grpc/README.md

@@ -93,6 +93,22 @@ To build Go code, there are several options:
 
 #### Compiling error, undefined: grpc.SupportPackageIsVersion
 
+##### If you are using Go modules:
+
+Please ensure your gRPC-Go version is `require`d at the appropriate version in
+the same module containing the generated `.pb.go` files.  For example,
+`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
+
+```
+module <your module name>
+
+require (
+    google.golang.org/grpc v1.27.0
+)
+```
+
+##### If you are *not* using Go modules:
+
 Please update proto package, gRPC package and rebuild the proto files:
  - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
  - `go get -u google.golang.org/grpc`

+ 70 - 0
vendor/google.golang.org/grpc/attributes/attributes.go

@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// All APIs in this package are EXPERIMENTAL.
+package attributes
+
+import "fmt"
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs.  Keys must be hashable, and users should define their own
+// types for keys.
+type Attributes struct {
+	m map[interface{}]interface{}
+}
+
+// New returns a new Attributes containing all key/value pairs in kvs.  If the
+// same key appears multiple times, the last value overwrites all previous
+// values for that key.  Panics if len(kvs) is not even.
+func New(kvs ...interface{}) *Attributes {
+	if len(kvs)%2 != 0 {
+		panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+	}
+	a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
+	for i := 0; i < len(kvs)/2; i++ {
+		a.m[kvs[i*2]] = kvs[i*2+1]
+	}
+	return a
+}
+
+// WithValues returns a new Attributes containing all key/value pairs in a and
+// kvs.  Panics if len(kvs) is not even.  If the same key appears multiple
+// times, the last value overwrites all previous values for that key.  To
+// remove an existing key, use a nil value.
+func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
+	if len(kvs)%2 != 0 {
+		panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
+	}
+	n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
+	for k, v := range a.m {
+		n.m[k] = v
+	}
+	for i := 0; i < len(kvs)/2; i++ {
+		n.m[kvs[i*2]] = kvs[i*2+1]
+	}
+	return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key.
+func (a *Attributes) Value(key interface{}) interface{} {
+	return a.m[key]
+}

+ 20 - 0
vendor/google.golang.org/grpc/backoff.go

@@ -23,16 +23,36 @@ package grpc
 
 import (
 	"time"
+
+	"google.golang.org/grpc/backoff"
 )
 
 // DefaultBackoffConfig uses values specified for backoff in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
 var DefaultBackoffConfig = BackoffConfig{
 	MaxDelay: 120 * time.Second,
 }
 
 // BackoffConfig defines the parameters for the default gRPC backoff strategy.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
 type BackoffConfig struct {
 	// MaxDelay is the upper bound of backoff delay.
 	MaxDelay time.Duration
 }
+
+// ConnectParams defines the parameters for connecting and retrying. Users are
+// encouraged to use this instead of the BackoffConfig type defined above. See
+// here for more details:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This API is EXPERIMENTAL.
+type ConnectParams struct {
+	// Backoff specifies the configuration options for connection backoff.
+	Backoff backoff.Config
+	// MinConnectTimeout is the minimum amount of time we are willing to give a
+	// connection to complete.
+	MinConnectTimeout time.Duration
+}

+ 52 - 0
vendor/google.golang.org/grpc/backoff/backoff.go

@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff provides configuration options for backoff.
+//
+// More details can be found at:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// All APIs in this package are experimental.
+package backoff
+
+import "time"
+
+// Config defines the configuration options for backoff.
+type Config struct {
+	// BaseDelay is the amount of time to backoff after the first failure.
+	BaseDelay time.Duration
+	// Multiplier is the factor with which to multiply backoffs after a
+	// failed retry. Should ideally be greater than 1.
+	Multiplier float64
+	// Jitter is the factor with which backoffs are randomized.
+	Jitter float64
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// DefaultConfig is a backoff configuration with the default values specfied
+// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This should be useful for callers who want to configure backoff with
+// non-default values only for a subset of the options.
+var DefaultConfig = Config{
+	BaseDelay:  1.0 * time.Second,
+	Multiplier: 1.6,
+	Jitter:     0.2,
+	MaxDelay:   120 * time.Second,
+}

+ 105 - 15
vendor/google.golang.org/grpc/balancer/balancer.go

@@ -117,6 +117,15 @@ type NewSubConnOptions struct {
 	HealthCheckEnabled bool
 }
 
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+	// State contains the connectivity state of the balancer, which is used to
+	// determine the state of the ClientConn.
+	ConnectivityState connectivity.State
+	// Picker is used to choose connections (SubConns) for RPCs.
+	Picker V2Picker
+}
+
 // ClientConn represents a gRPC ClientConn.
 //
 // This interface is to be implemented by gRPC. Users should not need a
@@ -137,10 +146,19 @@ type ClientConn interface {
 	//
 	// gRPC will update the connectivity state of the ClientConn, and will call pick
 	// on the new picker to pick new SubConn.
+	//
+	// Deprecated: use UpdateState instead
 	UpdateBalancerState(s connectivity.State, p Picker)
 
+	// UpdateState notifies gRPC that the balancer's internal state has
+	// changed.
+	//
+	// gRPC will update the connectivity state of the ClientConn, and will call pick
+	// on the new picker to pick new SubConns.
+	UpdateState(State)
+
 	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
-	ResolveNow(resolver.ResolveNowOption)
+	ResolveNow(resolver.ResolveNowOptions)
 
 	// Target returns the dial target for this ClientConn.
 	//
@@ -185,11 +203,14 @@ type ConfigParser interface {
 	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
 }
 
-// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
 	// FullMethodName is the method name that NewClientStream() is called
 	// with. The canonical format is /service/Method.
 	FullMethodName string
+	// Ctx is the RPC's context, and may contain relevant RPC-level information
+	// like the outgoing header metadata.
+	Ctx context.Context
 }
 
 // DoneInfo contains additional information for done.
@@ -215,7 +236,7 @@ var (
 	ErrNoSubConnAvailable = errors.New("no SubConn is available")
 	// ErrTransientFailure indicates all SubConns are in TransientFailure.
 	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
-	ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
+	ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
 )
 
 // Picker is used by gRPC to pick a SubConn to send an RPC.
@@ -223,6 +244,8 @@ var (
 // internal state has changed.
 //
 // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+//
+// Deprecated: use V2Picker instead
 type Picker interface {
 	// Pick returns the SubConn to be used to send the RPC.
 	// The returned SubConn must be one returned by NewSubConn().
@@ -243,18 +266,76 @@ type Picker interface {
 	//
 	// If the returned error is not nil:
 	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
-	// - If the error is ErrTransientFailure:
+	// - If the error is ErrTransientFailure or implements IsTransientFailure()
+	//   bool, returning true:
 	//   - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
 	//     is called to pick again;
 	//   - Otherwise, RPC will fail with unavailable error.
 	// - Else (error is other non-nil error):
-	//   - The RPC will fail with unavailable error.
+	//   - The RPC will fail with the error's status code, or Unknown if it is
+	//     not a status error.
 	//
 	// The returned done() function will be called once the rpc has finished,
 	// with the final status of that RPC.  If the SubConn returned is not a
 	// valid SubConn type, done may not be called.  done may be nil if balancer
 	// doesn't care about the RPC status.
-	Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
+	Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
+}
+
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+	// SubConn is the connection to use for this pick, if its state is Ready.
+	// If the state is not Ready, gRPC will block the RPC until a new Picker is
+	// provided by the balancer (using ClientConn.UpdateState).  The SubConn
+	// must be one returned by ClientConn.NewSubConn.
+	SubConn SubConn
+
+	// Done is called when the RPC is completed.  If the SubConn is not ready,
+	// this will be called with a nil parameter.  If the SubConn is not a valid
+	// type, Done may not be called.  May be nil if the balancer does not wish
+	// to be notified when the RPC completes.
+	Done func(DoneInfo)
+}
+
+type transientFailureError struct {
+	error
+}
+
+func (e *transientFailureError) IsTransientFailure() bool { return true }
+
+// TransientFailureError wraps err in an error implementing
+// IsTransientFailure() bool, returning true.
+func TransientFailureError(err error) error {
+	return &transientFailureError{error: err}
+}
+
+// V2Picker is used by gRPC to pick a SubConn to send an RPC.
+// Balancer is expected to generate a new picker from its snapshot every time its
+// internal state has changed.
+//
+// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+type V2Picker interface {
+	// Pick returns the connection to use for this RPC and related information.
+	//
+	// Pick should not block.  If the balancer needs to do I/O or any blocking
+	// or time-consuming work to service this call, it should return
+	// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+	// the Picker is updated (using ClientConn.UpdateState).
+	//
+	// If an error is returned:
+	//
+	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+	//   Picker is provided by the balancer (using ClientConn.UpdateState).
+	//
+	// - If the error implements IsTransientFailure() bool, returning true,
+	//   wait for ready RPCs will wait, but non-wait for ready RPCs will be
+	//   terminated with this error's Error() string and status code
+	//   Unavailable.
+	//
+	// - Any other errors terminate all RPCs with the code and message
+	//   provided.  If the error is not a status error, it will be converted by
+	//   gRPC to a status error with code Unknown.
+	Pick(info PickInfo) (PickResult, error)
 }
 
 // Balancer takes input from gRPC, manages SubConns, and collects and aggregates
@@ -292,8 +373,11 @@ type Balancer interface {
 
 // SubConnState describes the state of a SubConn.
 type SubConnState struct {
+	// ConnectivityState is the connectivity state of the SubConn.
 	ConnectivityState connectivity.State
-	// TODO: add last connection error
+	// ConnectionError is set if the ConnectivityState is TransientFailure,
+	// describing the reason the SubConn failed.  Otherwise, it is nil.
+	ConnectionError error
 }
 
 // ClientConnState describes the state of a ClientConn relevant to the
@@ -305,14 +389,23 @@ type ClientConnState struct {
 	BalancerConfig serviceconfig.LoadBalancingConfig
 }
 
+// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
+// problem with the provided name resolver data.
+var ErrBadResolverState = errors.New("bad resolver state")
+
 // V2Balancer is defined for documentation purposes.  If a Balancer also
 // implements V2Balancer, its UpdateClientConnState method will be called
 // instead of HandleResolvedAddrs and its UpdateSubConnState will be called
 // instead of HandleSubConnStateChange.
 type V2Balancer interface {
 	// UpdateClientConnState is called by gRPC when the state of the ClientConn
-	// changes.
-	UpdateClientConnState(ClientConnState)
+	// changes.  If the error returned is ErrBadResolverState, the ClientConn
+	// will begin calling ResolveNow on the active name resolver with
+	// exponential backoff until a subsequent call to UpdateClientConnState
+	// returns a nil error.  Any other errors are currently ignored.
+	UpdateClientConnState(ClientConnState) error
+	// ResolverError is called by gRPC when the name resolver reports an error.
+	ResolverError(error)
 	// UpdateSubConnState is called by gRPC when the state of a SubConn
 	// changes.
 	UpdateSubConnState(SubConn, SubConnState)
@@ -326,9 +419,8 @@ type V2Balancer interface {
 //
 // It's not thread safe.
 type ConnectivityStateEvaluator struct {
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
+	numReady      uint64 // Number of addrConns in ready state.
+	numConnecting uint64 // Number of addrConns in connecting state.
 }
 
 // RecordTransition records state change happening in subConn and based on that
@@ -348,8 +440,6 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
 			cse.numReady += updateVal
 		case connectivity.Connecting:
 			cse.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			cse.numTransientFailure += updateVal
 		}
 	}
 

+ 132 - 30
vendor/google.golang.org/grpc/balancer/base/balancer.go

@@ -20,6 +20,8 @@ package base
 
 import (
 	"context"
+	"errors"
+	"fmt"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
@@ -28,34 +30,44 @@ import (
 )
 
 type baseBuilder struct {
-	name          string
-	pickerBuilder PickerBuilder
-	config        Config
+	name            string
+	pickerBuilder   PickerBuilder
+	v2PickerBuilder V2PickerBuilder
+	config          Config
 }
 
 func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	return &baseBalancer{
-		cc:            cc,
-		pickerBuilder: bb.pickerBuilder,
+	bal := &baseBalancer{
+		cc:              cc,
+		pickerBuilder:   bb.pickerBuilder,
+		v2PickerBuilder: bb.v2PickerBuilder,
 
 		subConns: make(map[resolver.Address]balancer.SubConn),
 		scStates: make(map[balancer.SubConn]connectivity.State),
 		csEvltr:  &balancer.ConnectivityStateEvaluator{},
-		// Initialize picker to a picker that always return
-		// ErrNoSubConnAvailable, because when state of a SubConn changes, we
-		// may call UpdateBalancerState with this picker.
-		picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
-		config: bb.config,
+		config:   bb.config,
 	}
+	// Initialize picker to a picker that always returns
+	// ErrNoSubConnAvailable, because when state of a SubConn changes, we
+	// may call UpdateState with this picker.
+	if bb.pickerBuilder != nil {
+		bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+	} else {
+		bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
+	}
+	return bal
 }
 
 func (bb *baseBuilder) Name() string {
 	return bb.name
 }
 
+var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
+
 type baseBalancer struct {
-	cc            balancer.ClientConn
-	pickerBuilder PickerBuilder
+	cc              balancer.ClientConn
+	pickerBuilder   PickerBuilder
+	v2PickerBuilder V2PickerBuilder
 
 	csEvltr *balancer.ConnectivityStateEvaluator
 	state   connectivity.State
@@ -63,19 +75,46 @@ type baseBalancer struct {
 	subConns map[resolver.Address]balancer.SubConn
 	scStates map[balancer.SubConn]connectivity.State
 	picker   balancer.Picker
+	v2Picker balancer.V2Picker
 	config   Config
+
+	resolverErr error // the last error reported by the resolver; cleared on successful resolution
+	connErr     error // the last connection error; cleared upon leaving TransientFailure
 }
 
 func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
 	panic("not implemented")
 }
 
-func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
+func (b *baseBalancer) ResolverError(err error) {
+	b.resolverErr = err
+	if len(b.subConns) == 0 {
+		b.state = connectivity.TransientFailure
+	}
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.regeneratePicker()
+	if b.picker != nil {
+		b.cc.UpdateBalancerState(b.state, b.picker)
+	} else {
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: b.state,
+			Picker:            b.v2Picker,
+		})
+	}
+}
+
+func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
 	// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
 	// TODO: handle s.ResolverState.ServiceConfig?
 	if grpclog.V(2) {
 		grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
 	}
+	// Successful resolution; clear resolver error and ensure we return nil.
+	b.resolverErr = nil
 	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
 	addrsSet := make(map[resolver.Address]struct{})
 	for _, a := range s.ResolverState.Addresses {
@@ -101,26 +140,65 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
 			// The entry will be deleted in HandleSubConnStateChange.
 		}
 	}
+	// If resolver state contains no addresses, return an error so ClientConn
+	// will trigger re-resolve. Also records this as an resolver error, so when
+	// the overall state turns transient failure, the error message will have
+	// the zero address information.
+	if len(s.ResolverState.Addresses) == 0 {
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+	return nil
+}
+
+// mergeErrors builds an error from the last connection error and the last
+// resolver error.  Must only be called if b.state is TransientFailure.
+func (b *baseBalancer) mergeErrors() error {
+	// connErr must always be non-nil unless there are no SubConns, in which
+	// case resolverErr must be non-nil.
+	if b.connErr == nil {
+		return fmt.Errorf("last resolver error: %v", b.resolverErr)
+	}
+	if b.resolverErr == nil {
+		return fmt.Errorf("last connection error: %v", b.connErr)
+	}
+	return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
 }
 
 // regeneratePicker takes a snapshot of the balancer, and generates a picker
 // from it. The picker is
-//  - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
+//  - errPicker if the balancer is in TransientFailure,
 //  - built by the pickerBuilder with all READY SubConns otherwise.
 func (b *baseBalancer) regeneratePicker() {
 	if b.state == connectivity.TransientFailure {
-		b.picker = NewErrPicker(balancer.ErrTransientFailure)
+		if b.pickerBuilder != nil {
+			b.picker = NewErrPicker(balancer.ErrTransientFailure)
+		} else {
+			b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
+		}
 		return
 	}
-	readySCs := make(map[resolver.Address]balancer.SubConn)
+	if b.pickerBuilder != nil {
+		readySCs := make(map[resolver.Address]balancer.SubConn)
 
-	// Filter out all ready SCs from full subConn map.
-	for addr, sc := range b.subConns {
-		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
-			readySCs[addr] = sc
+		// Filter out all ready SCs from full subConn map.
+		for addr, sc := range b.subConns {
+			if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+				readySCs[addr] = sc
+			}
+		}
+		b.picker = b.pickerBuilder.Build(readySCs)
+	} else {
+		readySCs := make(map[balancer.SubConn]SubConnInfo)
+
+		// Filter out all ready SCs from full subConn map.
+		for addr, sc := range b.subConns {
+			if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+				readySCs[sc] = SubConnInfo{Address: addr}
+			}
 		}
+		b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
 	}
-	b.picker = b.pickerBuilder.Build(readySCs)
 }
 
 func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
@@ -139,6 +217,12 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
 		}
 		return
 	}
+	if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
+		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
+		// CONNECTING transitions to prevent the aggregated state from being
+		// always CONNECTING when many backends exist but are all down.
+		return
+	}
 	b.scStates[sc] = s
 	switch s {
 	case connectivity.Idle:
@@ -147,22 +231,27 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
 		// When an address was removed by resolver, b called RemoveSubConn but
 		// kept the sc's state in scStates. Remove state for this sc here.
 		delete(b.scStates, sc)
+	case connectivity.TransientFailure:
+		// Save error to be reported via picker.
+		b.connErr = state.ConnectionError
 	}
 
-	oldAggrState := b.state
 	b.state = b.csEvltr.RecordTransition(oldS, s)
 
 	// Regenerate picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	//  - this sc entered or left ready
+	//  - the aggregated state of balancer is TransientFailure
+	//    (may need to update error message)
 	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
-		(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+		b.state == connectivity.TransientFailure {
 		b.regeneratePicker()
 	}
 
-	b.cc.UpdateBalancerState(b.state, b.picker)
+	if b.picker != nil {
+		b.cc.UpdateBalancerState(b.state, b.picker)
+	} else {
+		b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
+	}
 }
 
 // Close is a nop because base balancer doesn't have internal state to clean up,
@@ -179,6 +268,19 @@ type errPicker struct {
 	err error // Pick() always returns this err.
 }
 
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
 	return nil, nil, p.err
 }
+
+// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
+func NewErrPickerV2(err error) balancer.V2Picker {
+	return &errPickerV2{err: err}
+}
+
+type errPickerV2 struct {
+	err error // Pick() always returns this err.
+}
+
+func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	return balancer.PickResult{}, p.err
+}

+ 29 - 0
vendor/google.golang.org/grpc/balancer/base/base.go

@@ -42,6 +42,26 @@ type PickerBuilder interface {
 	Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
 }
 
+// V2PickerBuilder creates balancer.V2Picker.
+type V2PickerBuilder interface {
+	// Build returns a picker that will be used by gRPC to pick a SubConn.
+	Build(info PickerBuildInfo) balancer.V2Picker
+}
+
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+	// ReadySCs is a map from all ready SubConns to the Addresses used to
+	// create them.
+	ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+	Address resolver.Address // the address used to create this SubConn
+}
+
 // NewBalancerBuilder returns a balancer builder. The balancers
 // built by this builder will use the picker builder to build pickers.
 func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
@@ -62,3 +82,12 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config)
 		config:        config,
 	}
 }
+
+// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
+func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
+	return &baseBuilder{
+		name:            name,
+		v2PickerBuilder: pb,
+		config:          config,
+	}
+}

+ 8 - 10
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go

@@ -22,14 +22,12 @@
 package roundrobin
 
 import (
-	"context"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer/base"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/grpcrand"
-	"google.golang.org/grpc/resolver"
 )
 
 // Name is the name of round_robin balancer.
@@ -37,7 +35,7 @@ const Name = "round_robin"
 
 // newBuilder creates a new roundrobin balancer builder.
 func newBuilder() balancer.Builder {
-	return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+	return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
 }
 
 func init() {
@@ -46,13 +44,13 @@ func init() {
 
 type rrPickerBuilder struct{}
 
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
-	grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
-	if len(readySCs) == 0 {
-		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
+	grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
+	if len(info.ReadySCs) == 0 {
+		return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
 	}
 	var scs []balancer.SubConn
-	for _, sc := range readySCs {
+	for sc := range info.ReadySCs {
 		scs = append(scs, sc)
 	}
 	return &rrPicker{
@@ -74,10 +72,10 @@ type rrPicker struct {
 	next int
 }
 
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
 	p.mu.Lock()
 	sc := p.subConns[p.next]
 	p.next = (p.next + 1) % len(p.subConns)
 	p.mu.Unlock()
-	return sc, nil, nil
+	return balancer.PickResult{SubConn: sc}, nil
 }

+ 60 - 107
vendor/google.golang.org/grpc/balancer_conn_wrappers.go

@@ -24,7 +24,9 @@ import (
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/buffer"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/resolver"
 )
 
@@ -32,64 +34,17 @@ import (
 type scStateUpdate struct {
 	sc    balancer.SubConn
 	state connectivity.State
-}
-
-// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
-// TODO make a general purpose buffer that uses interface{}.
-type scStateUpdateBuffer struct {
-	c       chan *scStateUpdate
-	mu      sync.Mutex
-	backlog []*scStateUpdate
-}
-
-func newSCStateUpdateBuffer() *scStateUpdateBuffer {
-	return &scStateUpdateBuffer{
-		c: make(chan *scStateUpdate, 1),
-	}
-}
-
-func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if len(b.backlog) == 0 {
-		select {
-		case b.c <- t:
-			return
-		default:
-		}
-	}
-	b.backlog = append(b.backlog, t)
-}
-
-func (b *scStateUpdateBuffer) load() {
-	b.mu.Lock()
-	defer b.mu.Unlock()
-	if len(b.backlog) > 0 {
-		select {
-		case b.c <- b.backlog[0]:
-			b.backlog[0] = nil
-			b.backlog = b.backlog[1:]
-		default:
-		}
-	}
-}
-
-// get returns the channel that the scStateUpdate will be sent to.
-//
-// Upon receiving, the caller should call load to send another
-// scStateChangeTuple onto the channel if there is any.
-func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
-	return b.c
+	err   error
 }
 
 // ccBalancerWrapper is a wrapper on top of cc for balancers.
 // It implements balancer.ClientConn interface.
 type ccBalancerWrapper struct {
-	cc               *ClientConn
-	balancer         balancer.Balancer
-	stateChangeQueue *scStateUpdateBuffer
-	ccUpdateCh       chan *balancer.ClientConnState
-	done             chan struct{}
+	cc         *ClientConn
+	balancerMu sync.Mutex // synchronizes calls to the balancer
+	balancer   balancer.Balancer
+	scBuffer   *buffer.Unbounded
+	done       *grpcsync.Event
 
 	mu       sync.Mutex
 	subConns map[*acBalancerWrapper]struct{}
@@ -97,11 +52,10 @@ type ccBalancerWrapper struct {
 
 func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
 	ccb := &ccBalancerWrapper{
-		cc:               cc,
-		stateChangeQueue: newSCStateUpdateBuffer(),
-		ccUpdateCh:       make(chan *balancer.ClientConnState, 1),
-		done:             make(chan struct{}),
-		subConns:         make(map[*acBalancerWrapper]struct{}),
+		cc:       cc,
+		scBuffer: buffer.NewUnbounded(),
+		done:     grpcsync.NewEvent(),
+		subConns: make(map[*acBalancerWrapper]struct{}),
 	}
 	go ccb.watcher()
 	ccb.balancer = b.Build(ccb, bopts)
@@ -113,36 +67,23 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
 func (ccb *ccBalancerWrapper) watcher() {
 	for {
 		select {
-		case t := <-ccb.stateChangeQueue.get():
-			ccb.stateChangeQueue.load()
-			select {
-			case <-ccb.done:
-				ccb.balancer.Close()
-				return
-			default:
+		case t := <-ccb.scBuffer.Get():
+			ccb.scBuffer.Load()
+			if ccb.done.HasFired() {
+				break
 			}
+			ccb.balancerMu.Lock()
+			su := t.(*scStateUpdate)
 			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
+				ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
 			} else {
-				ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
+				ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
 			}
-		case s := <-ccb.ccUpdateCh:
-			select {
-			case <-ccb.done:
-				ccb.balancer.Close()
-				return
-			default:
-			}
-			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateClientConnState(*s)
-			} else {
-				ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
-			}
-		case <-ccb.done:
+			ccb.balancerMu.Unlock()
+		case <-ccb.done.Done():
 		}
 
-		select {
-		case <-ccb.done:
+		if ccb.done.HasFired() {
 			ccb.balancer.Close()
 			ccb.mu.Lock()
 			scs := ccb.subConns
@@ -151,19 +92,17 @@ func (ccb *ccBalancerWrapper) watcher() {
 			for acbw := range scs {
 				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
 			}
-			ccb.UpdateBalancerState(connectivity.Connecting, nil)
+			ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
 			return
-		default:
 		}
-		ccb.cc.firstResolveEvent.Fire()
 	}
 }
 
 func (ccb *ccBalancerWrapper) close() {
-	close(ccb.done)
+	ccb.done.Fire()
 }
 
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
 	// When updating addresses for a SubConn, if the address in use is not in
 	// the new addresses, the old ac will be tearDown() and a new ac will be
 	// created. tearDown() generates a state change with Shutdown state, we
@@ -174,30 +113,29 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
 	if sc == nil {
 		return
 	}
-	ccb.stateChangeQueue.put(&scStateUpdate{
+	ccb.scBuffer.Put(&scStateUpdate{
 		sc:    sc,
 		state: s,
+		err:   err,
 	})
 }
 
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
-	if ccb.cc.curBalancerName != grpclbName {
-		// Filter any grpclb addresses since we don't have the grpclb balancer.
-		s := &ccs.ResolverState
-		for i := 0; i < len(s.Addresses); {
-			if s.Addresses[i].Type == resolver.GRPCLB {
-				copy(s.Addresses[i:], s.Addresses[i+1:])
-				s.Addresses = s.Addresses[:len(s.Addresses)-1]
-				continue
-			}
-			i++
-		}
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+	ccb.balancerMu.Lock()
+	defer ccb.balancerMu.Unlock()
+	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+		return ub.UpdateClientConnState(*ccs)
 	}
-	select {
-	case <-ccb.ccUpdateCh:
-	default:
+	ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
+	return nil
+}
+
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
+		ccb.balancerMu.Lock()
+		ub.ResolverError(err)
+		ccb.balancerMu.Unlock()
 	}
-	ccb.ccUpdateCh <- ccs
 }
 
 func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -250,7 +188,22 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc
 	ccb.cc.csMgr.updateState(s)
 }
 
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+	ccb.mu.Lock()
+	defer ccb.mu.Unlock()
+	if ccb.subConns == nil {
+		return
+	}
+	// Update picker before updating state.  Even though the ordering here does
+	// not matter, it can lead to multiple calls of Pick in the common start-up
+	// case where we wait for ready and then perform an RPC.  If the picker is
+	// updated later, we could call the "connecting" picker when the state is
+	// updated, and then call the "ready" picker after the picker gets updated.
+	ccb.cc.blockingpicker.updatePickerV2(s.Picker)
+	ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
 	ccb.cc.resolveNow(o)
 }
 
@@ -292,7 +245,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
 
 		ac, err := cc.newAddrConn(addrs, opts)
 		if err != nil {
-			grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
+			channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
 			return
 		}
 		acbw.ac = ac

+ 17 - 17
vendor/google.golang.org/grpc/balancer_v1_wrapper.go

@@ -19,7 +19,6 @@
 package grpc
 
 import (
-	"context"
 	"sync"
 
 	"google.golang.org/grpc/balancer"
@@ -49,7 +48,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
 		csEvltr:    &balancer.ConnectivityStateEvaluator{},
 		state:      connectivity.Idle,
 	}
-	cc.UpdateBalancerState(connectivity.Idle, bw)
+	cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
 	go bw.lbWatcher()
 	return bw
 }
@@ -243,7 +242,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
 	if bw.state != sa {
 		bw.state = sa
 	}
-	bw.cc.UpdateBalancerState(bw.state, bw)
+	bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
 	if s == connectivity.Shutdown {
 		// Remove state for this sc.
 		delete(bw.connSt, sc)
@@ -275,17 +274,17 @@ func (bw *balancerWrapper) Close() {
 
 // The picker is the balancerWrapper itself.
 // It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
+func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
 	failfast := true // Default failfast is true.
-	if ss, ok := rpcInfoFromContext(ctx); ok {
+	if ss, ok := rpcInfoFromContext(info.Ctx); ok {
 		failfast = ss.failfast
 	}
-	a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
+	a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
 	if err != nil {
-		return nil, nil, err
+		return balancer.PickResult{}, toRPCErr(err)
 	}
 	if p != nil {
-		done = func(balancer.DoneInfo) { p() }
+		result.Done = func(balancer.DoneInfo) { p() }
 		defer func() {
 			if err != nil {
 				p()
@@ -297,38 +296,39 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
 	defer bw.mu.Unlock()
 	if bw.pickfirst {
 		// Get the first sc in conns.
-		for _, sc := range bw.conns {
-			return sc, done, nil
+		for _, result.SubConn = range bw.conns {
+			return result, nil
 		}
-		return nil, nil, balancer.ErrNoSubConnAvailable
+		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
 	}
-	sc, ok1 := bw.conns[resolver.Address{
+	var ok1 bool
+	result.SubConn, ok1 = bw.conns[resolver.Address{
 		Addr:       a.Addr,
 		Type:       resolver.Backend,
 		ServerName: "",
 		Metadata:   a.Metadata,
 	}]
-	s, ok2 := bw.connSt[sc]
+	s, ok2 := bw.connSt[result.SubConn]
 	if !ok1 || !ok2 {
 		// This can only happen due to a race where Get() returned an address
 		// that was subsequently removed by Notify.  In this case we should
 		// retry always.
-		return nil, nil, balancer.ErrNoSubConnAvailable
+		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
 	}
 	switch s.s {
 	case connectivity.Ready, connectivity.Idle:
-		return sc, done, nil
+		return result, nil
 	case connectivity.Shutdown, connectivity.TransientFailure:
 		// If the returned sc has been shut down or is in transient failure,
 		// return error, and this RPC will fail or wait for another picker (if
 		// non-failfast).
-		return nil, nil, balancer.ErrTransientFailure
+		return balancer.PickResult{}, balancer.ErrTransientFailure
 	default:
 		// For other states (connecting or unknown), the v1 balancer would
 		// traditionally wait until ready and then issue the RPC.  Returning
 		// ErrNoSubConnAvailable will be a slight improvement in that it will
 		// allow the balancer to choose another address in case others are
 		// connected.
-		return nil, nil, balancer.ErrNoSubConnAvailable
+		return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
 	}
 }

+ 235 - 149
vendor/google.golang.org/grpc/clientconn.go

@@ -31,21 +31,23 @@ import (
 	"time"
 
 	"google.golang.org/grpc/balancer"
-	_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+	"google.golang.org/grpc/balancer/base"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/grpcutil"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
-	_ "google.golang.org/grpc/resolver/dns"         // To register dns resolver.
-	_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
 	"google.golang.org/grpc/serviceconfig"
 	"google.golang.org/grpc/status"
+
+	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
+	_ "google.golang.org/grpc/internal/resolver/dns"         // To register dns resolver.
+	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
 )
 
 const (
@@ -149,7 +151,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 	if channelz.IsOn() {
 		if cc.dopts.channelzParentID != 0 {
 			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
+			channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{
 				Desc:     "Channel Created",
 				Severity: channelz.CtINFO,
 				Parent: &channelz.TraceEventDesc{
@@ -159,10 +161,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 			})
 		} else {
 			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     "Channel Created",
-				Severity: channelz.CtINFO,
-			})
+			channelz.Info(cc.channelzID, "Channel Created")
 		}
 		cc.csMgr.channelzID = cc.channelzID
 	}
@@ -186,11 +185,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 	}
 
 	if cc.dopts.defaultServiceConfigRawJSON != nil {
-		sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
-		if err != nil {
-			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
+		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+		if scpr.Err != nil {
+			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
 		}
-		cc.dopts.defaultServiceConfig = sc
+		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
 	}
 	cc.mkp = cc.dopts.copts.KeepaliveParams
 
@@ -235,29 +234,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 		}
 	}
 	if cc.dopts.bs == nil {
-		cc.dopts.bs = backoff.Exponential{
-			MaxDelay: DefaultBackoffConfig.MaxDelay,
+		cc.dopts.bs = backoff.DefaultExponential
+	}
+
+	// Determine the resolver to use.
+	cc.parsedTarget = grpcutil.ParseTarget(cc.target)
+	channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
+	resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
+	if resolverBuilder == nil {
+		// If resolver builder is still nil, the parsed target's scheme is
+		// not registered. Fallback to default resolver and set Endpoint to
+		// the original target.
+		channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
+		cc.parsedTarget = resolver.Target{
+			Scheme:   resolver.GetDefaultScheme(),
+			Endpoint: target,
 		}
-	}
-	if cc.dopts.resolverBuilder == nil {
-		// Only try to parse target when resolver builder is not already set.
-		cc.parsedTarget = parseTarget(cc.target)
-		grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
-		cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
-		if cc.dopts.resolverBuilder == nil {
-			// If resolver builder is still nil, the parsed target's scheme is
-			// not registered. Fallback to default resolver and set Endpoint to
-			// the original target.
-			grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
-			cc.parsedTarget = resolver.Target{
-				Scheme:   resolver.GetDefaultScheme(),
-				Endpoint: target,
-			}
-			cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
+		resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
+		if resolverBuilder == nil {
+			return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
 		}
-	} else {
-		cc.parsedTarget = resolver.Target{Endpoint: target}
 	}
+
 	creds := cc.dopts.copts.TransportCredentials
 	if creds != nil && creds.Info().ServerName != "" {
 		cc.authority = creds.Info().ServerName
@@ -297,14 +295,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
 	}
 
 	// Build the resolver.
-	rWrapper, err := newCCResolverWrapper(cc)
+	rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
 	if err != nil {
 		return nil, fmt.Errorf("failed to build resolver: %v", err)
 	}
-
 	cc.mu.Lock()
 	cc.resolverWrapper = rWrapper
 	cc.mu.Unlock()
+
 	// A blocking dial blocks until the clientConn is ready.
 	if cc.dopts.block {
 		for {
@@ -415,12 +413,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
 		return
 	}
 	csm.state = state
-	if channelz.IsOn() {
-		channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
-			Desc:     fmt.Sprintf("Channel Connectivity change to %v", state),
-			Severity: channelz.CtINFO,
-		})
-	}
+	channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state)
 	if csm.notifyChan != nil {
 		// There are other goroutines waiting on this channel.
 		close(csm.notifyChan)
@@ -443,7 +436,32 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
 	return csm.notifyChan
 }
 
-// ClientConn represents a client connection to an RPC server.
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs.  It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+	// Invoke performs a unary RPC and returns after the response is received
+	// into reply.
+	Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
+	// NewStream begins a streaming RPC.
+	NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
+// ClientConn represents a virtual connection to a conceptual endpoint, to
+// perform RPCs.
+//
+// A ClientConn is free to have zero or more actual connections to the endpoint
+// based on configuration, load, etc. It is also free to determine which actual
+// endpoints to use and may change it every RPC, permitting client-side load
+// balancing.
+//
+// A ClientConn encapsulates a range of functionality including name
+// resolution, TCP connection establishment (with retries and backoff) and TLS
+// handshakes. It also handles errors on established connections by
+// re-resolving the name and reconnecting.
 type ClientConn struct {
 	ctx    context.Context
 	cancel context.CancelFunc
@@ -532,58 +550,104 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
 	}
 }
 
-func (cc *ClientConn) updateResolverState(s resolver.State) error {
+var emptyServiceConfig *ServiceConfig
+
+func init() {
+	cfg := parseServiceConfig("{}")
+	if cfg.Err != nil {
+		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
+	}
+	emptyServiceConfig = cfg.Config.(*ServiceConfig)
+}
+
+func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+	if cc.sc != nil {
+		cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+		return
+	}
+	if cc.dopts.defaultServiceConfig != nil {
+		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+	} else {
+		cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+	}
+}
+
+func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+	defer cc.firstResolveEvent.Fire()
 	cc.mu.Lock()
-	defer cc.mu.Unlock()
 	// Check if the ClientConn is already closed. Some fields (e.g.
 	// balancerWrapper) are set to nil when closing the ClientConn, and could
 	// cause nil pointer panic if we don't have this check.
 	if cc.conns == nil {
+		cc.mu.Unlock()
 		return nil
 	}
 
-	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
-		if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
-			cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
+	if err != nil {
+		// May need to apply the initial service config in case the resolver
+		// doesn't support service configs, or doesn't provide a service config
+		// with the new addresses.
+		cc.maybeApplyDefaultServiceConfig(nil)
+
+		if cc.balancerWrapper != nil {
+			cc.balancerWrapper.resolverError(err)
 		}
-	} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
-		cc.applyServiceConfig(sc)
+
+		// No addresses are valid with err set; return early.
+		cc.mu.Unlock()
+		return balancer.ErrBadResolverState
 	}
 
-	var balCfg serviceconfig.LoadBalancingConfig
-	if cc.dopts.balancerBuilder == nil {
-		// Only look at balancer types and switch balancer if balancer dial
-		// option is not set.
-		var newBalancerName string
-		if cc.sc != nil && cc.sc.lbConfig != nil {
-			newBalancerName = cc.sc.lbConfig.name
-			balCfg = cc.sc.lbConfig.cfg
+	var ret error
+	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
+		cc.maybeApplyDefaultServiceConfig(s.Addresses)
+		// TODO: do we need to apply a failing LB policy if there is no
+		// default, per the error handling design?
+	} else {
+		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
+			cc.applyServiceConfigAndBalancer(sc, s.Addresses)
 		} else {
-			var isGRPCLB bool
-			for _, a := range s.Addresses {
-				if a.Type == resolver.GRPCLB {
-					isGRPCLB = true
-					break
+			ret = balancer.ErrBadResolverState
+			if cc.balancerWrapper == nil {
+				var err error
+				if s.ServiceConfig.Err != nil {
+					err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
+				} else {
+					err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
 				}
-			}
-			if isGRPCLB {
-				newBalancerName = grpclbName
-			} else if cc.sc != nil && cc.sc.LB != nil {
-				newBalancerName = *cc.sc.LB
-			} else {
-				newBalancerName = PickFirstBalancerName
+				cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+				cc.csMgr.updateState(connectivity.TransientFailure)
+				cc.mu.Unlock()
+				return ret
 			}
 		}
-		cc.switchBalancer(newBalancerName)
-	} else if cc.balancerWrapper == nil {
-		// Balancer dial option was set, and this is the first time handling
-		// resolved addresses. Build a balancer with dopts.balancerBuilder.
-		cc.curBalancerName = cc.dopts.balancerBuilder.Name()
-		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
 	}
 
-	cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
-	return nil
+	var balCfg serviceconfig.LoadBalancingConfig
+	if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
+		balCfg = cc.sc.lbConfig.cfg
+	}
+
+	cbn := cc.curBalancerName
+	bw := cc.balancerWrapper
+	cc.mu.Unlock()
+	if cbn != grpclbName {
+		// Filter any grpclb addresses since we don't have the grpclb balancer.
+		for i := 0; i < len(s.Addresses); {
+			if s.Addresses[i].Type == resolver.GRPCLB {
+				copy(s.Addresses[i:], s.Addresses[i+1:])
+				s.Addresses = s.Addresses[:len(s.Addresses)-1]
+				continue
+			}
+			i++
+		}
+	}
+	uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
+	if ret == nil {
+		ret = uccsErr // prefer ErrBadResolver state since any other error is
+		// currently meaningless to the caller.
+	}
+	return ret
 }
 
 // switchBalancer starts the switching from current balancer to the balancer
@@ -599,9 +663,9 @@ func (cc *ClientConn) switchBalancer(name string) {
 		return
 	}
 
-	grpclog.Infof("ClientConn switching balancer to %q", name)
+	channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name)
 	if cc.dopts.balancerBuilder != nil {
-		grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
+		channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
 		return
 	}
 	if cc.balancerWrapper != nil {
@@ -609,29 +673,19 @@ func (cc *ClientConn) switchBalancer(name string) {
 	}
 
 	builder := balancer.Get(name)
-	if channelz.IsOn() {
-		if builder == nil {
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
-				Severity: channelz.CtWarning,
-			})
-		} else {
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Channel switches to new LB policy %q", name),
-				Severity: channelz.CtINFO,
-			})
-		}
-	}
 	if builder == nil {
-		grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
+		channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
+		channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
 		builder = newPickfirstBuilder()
+	} else {
+		channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name)
 	}
 
 	cc.curBalancerName = builder.Name()
 	cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
 }
 
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
 	cc.mu.Lock()
 	if cc.conns == nil {
 		cc.mu.Unlock()
@@ -639,7 +693,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
 	}
 	// TODO(bar switching) send updates to all balancer wrappers when balancer
 	// gracefully switching is supported.
-	cc.balancerWrapper.handleSubConnStateChange(sc, s)
+	cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
 	cc.mu.Unlock()
 }
 
@@ -648,6 +702,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
 // Caller needs to make sure len(addrs) > 0.
 func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
 	ac := &addrConn{
+		state:        connectivity.Idle,
 		cc:           cc,
 		addrs:        addrs,
 		scopts:       opts,
@@ -664,7 +719,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
 	}
 	if channelz.IsOn() {
 		ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
-		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+		channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
 			Desc:     "Subchannel Created",
 			Severity: channelz.CtINFO,
 			Parent: &channelz.TraceEventDesc{
@@ -736,7 +791,7 @@ func (ac *addrConn) connect() error {
 	}
 	// Update connectivity state within the lock to prevent subsequent or
 	// concurrent calls from resetting the transport more than once.
-	ac.updateConnectivityState(connectivity.Connecting)
+	ac.updateConnectivityState(connectivity.Connecting, nil)
 	ac.mu.Unlock()
 
 	// Start a goroutine connecting to the server asynchronously.
@@ -762,7 +817,7 @@ func (ac *addrConn) connect() error {
 func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
 	ac.mu.Lock()
 	defer ac.mu.Unlock()
-	grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+	channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
 	if ac.state == connectivity.Shutdown ||
 		ac.state == connectivity.TransientFailure ||
 		ac.state == connectivity.Idle {
@@ -782,7 +837,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
 			break
 		}
 	}
-	grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
+	channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
 	if curAddrFound {
 		ac.addrs = addrs
 	}
@@ -822,7 +877,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
 }
 
 func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
-	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
+	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+		Ctx:            ctx,
 		FullMethodName: method,
 	})
 	if err != nil {
@@ -831,10 +887,10 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
 	return t, done, nil
 }
 
-func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
 	if sc == nil {
 		// should never reach here.
-		return fmt.Errorf("got nil pointer for service config")
+		return
 	}
 	cc.sc = sc
 
@@ -850,10 +906,38 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
 		cc.retryThrottler.Store((*retryThrottler)(nil))
 	}
 
-	return nil
+	if cc.dopts.balancerBuilder == nil {
+		// Only look at balancer types and switch balancer if balancer dial
+		// option is not set.
+		var newBalancerName string
+		if cc.sc != nil && cc.sc.lbConfig != nil {
+			newBalancerName = cc.sc.lbConfig.name
+		} else {
+			var isGRPCLB bool
+			for _, a := range addrs {
+				if a.Type == resolver.GRPCLB {
+					isGRPCLB = true
+					break
+				}
+			}
+			if isGRPCLB {
+				newBalancerName = grpclbName
+			} else if cc.sc != nil && cc.sc.LB != nil {
+				newBalancerName = *cc.sc.LB
+			} else {
+				newBalancerName = PickFirstBalancerName
+			}
+		}
+		cc.switchBalancer(newBalancerName)
+	} else if cc.balancerWrapper == nil {
+		// Balancer dial option was set, and this is the first time handling
+		// resolved addresses. Build a balancer with dopts.balancerBuilder.
+		cc.curBalancerName = cc.dopts.balancerBuilder.Name()
+		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
+	}
 }
 
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
 	cc.mu.RLock()
 	r := cc.resolverWrapper
 	cc.mu.RUnlock()
@@ -875,8 +959,9 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
 // This API is EXPERIMENTAL.
 func (cc *ClientConn) ResetConnectBackoff() {
 	cc.mu.Lock()
-	defer cc.mu.Unlock()
-	for ac := range cc.conns {
+	conns := cc.conns
+	cc.mu.Unlock()
+	for ac := range conns {
 		ac.resetConnectBackoff()
 	}
 }
@@ -923,7 +1008,7 @@ func (cc *ClientConn) Close() error {
 				Severity: channelz.CtINFO,
 			}
 		}
-		channelz.AddTraceEvent(cc.channelzID, ted)
+		channelz.AddTraceEvent(cc.channelzID, 0, ted)
 		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
 		// the entity being deleted, and thus prevent it from being deleted right away.
 		channelz.RemoveEntry(cc.channelzID)
@@ -962,20 +1047,13 @@ type addrConn struct {
 }
 
 // Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
 	if ac.state == s {
 		return
 	}
-
-	updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
 	ac.state = s
-	if channelz.IsOn() {
-		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-			Desc:     updateMsg,
-			Severity: channelz.CtINFO,
-		})
-	}
-	ac.cc.handleSubConnStateChange(ac.acbw, s)
+	channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s)
+	ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
 }
 
 // adjustParams updates parameters used to create transports upon
@@ -995,7 +1073,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
 func (ac *addrConn) resetTransport() {
 	for i := 0; ; i++ {
 		if i > 0 {
-			ac.cc.resolveNow(resolver.ResolveNowOption{})
+			ac.cc.resolveNow(resolver.ResolveNowOptions{})
 		}
 
 		ac.mu.Lock()
@@ -1024,7 +1102,7 @@ func (ac *addrConn) resetTransport() {
 		// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
 		connectDeadline := time.Now().Add(dialDuration)
 
-		ac.updateConnectivityState(connectivity.Connecting)
+		ac.updateConnectivityState(connectivity.Connecting, nil)
 		ac.transport = nil
 		ac.mu.Unlock()
 
@@ -1037,7 +1115,7 @@ func (ac *addrConn) resetTransport() {
 				ac.mu.Unlock()
 				return
 			}
-			ac.updateConnectivityState(connectivity.TransientFailure)
+			ac.updateConnectivityState(connectivity.TransientFailure, err)
 
 			// Backoff.
 			b := ac.resetBackoff
@@ -1093,6 +1171,7 @@ func (ac *addrConn) resetTransport() {
 // first successful one. It returns the transport, the address and a Event in
 // the successful case. The Event fires when the returned transport disconnects.
 func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+	var firstConnErr error
 	for _, addr := range addrs {
 		ac.mu.Lock()
 		if ac.state == connectivity.Shutdown {
@@ -1110,22 +1189,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
 		}
 		ac.mu.Unlock()
 
-		if channelz.IsOn() {
-			channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
-				Severity: channelz.CtINFO,
-			})
-		}
+		channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
 
 		newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
 		if err == nil {
 			return newTr, addr, reconnect, nil
 		}
+		if firstConnErr == nil {
+			firstConnErr = err
+		}
 		ac.cc.blockingpicker.updateConnectionError(err)
 	}
 
 	// Couldn't connect to any address.
-	return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+	return nil, resolver.Address{}, nil, firstConnErr
 }
 
 // createTransport creates a connection to addr. It returns the transport and a
@@ -1136,10 +1213,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
 	onCloseCalled := make(chan struct{})
 	reconnect := grpcsync.NewEvent()
 
+	authority := ac.cc.authority
+	// addr.ServerName takes precedent over ClientConn authority, if present.
+	if addr.ServerName != "" {
+		authority = addr.ServerName
+	}
+
 	target := transport.TargetInfo{
 		Addr:      addr.Addr,
 		Metadata:  addr.Metadata,
-		Authority: ac.cc.authority,
+		Authority: authority,
 	}
 
 	once := sync.Once{}
@@ -1152,7 +1235,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
 				// state to Connecting.
 				//
 				// TODO: this should be Idle when grpc-go properly supports it.
-				ac.updateConnectivityState(connectivity.Connecting)
+				ac.updateConnectivityState(connectivity.Connecting, nil)
 			}
 		})
 		ac.mu.Unlock()
@@ -1167,7 +1250,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
 				// state to Connecting.
 				//
 				// TODO: this should be Idle when grpc-go properly supports it.
-				ac.updateConnectivityState(connectivity.Connecting)
+				ac.updateConnectivityState(connectivity.Connecting, nil)
 			}
 		})
 		ac.mu.Unlock()
@@ -1188,15 +1271,15 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
 	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
 	if err != nil {
 		// newTr is either nil, or closed.
-		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
+		channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
 		return nil, nil, err
 	}
 
 	select {
-	case <-time.After(connectDeadline.Sub(time.Now())):
+	case <-time.After(time.Until(connectDeadline)):
 		// We didn't get the preface in time.
 		newTr.Close()
-		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
+		channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
 		return nil, nil, errors.New("timed out waiting for server handshake")
 	case <-prefaceReceived:
 		// We got the preface - huzzah! things are good.
@@ -1224,7 +1307,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
 	var healthcheckManagingState bool
 	defer func() {
 		if !healthcheckManagingState {
-			ac.updateConnectivityState(connectivity.Ready)
+			ac.updateConnectivityState(connectivity.Ready, nil)
 		}
 	}()
 
@@ -1243,7 +1326,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
 		// The health package is not imported to set health check function.
 		//
 		// TODO: add a link to the health check doc in the error message.
-		grpclog.Error("Health check is requested but health check function is not set.")
+		channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.")
 		return
 	}
 
@@ -1260,28 +1343,22 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
 		ac.mu.Unlock()
 		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
 	}
-	setConnectivityState := func(s connectivity.State) {
+	setConnectivityState := func(s connectivity.State, lastErr error) {
 		ac.mu.Lock()
 		defer ac.mu.Unlock()
 		if ac.transport != currentTr {
 			return
 		}
-		ac.updateConnectivityState(s)
+		ac.updateConnectivityState(s, lastErr)
 	}
 	// Start the health checking stream.
 	go func() {
 		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
 		if err != nil {
 			if status.Code(err) == codes.Unimplemented {
-				if channelz.IsOn() {
-					channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-						Desc:     "Subchannel health check is unimplemented at server side, thus health check is disabled",
-						Severity: channelz.CtError,
-					})
-				}
-				grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
+				channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
 			} else {
-				grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
+				channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
 			}
 		}
 	}()
@@ -1331,8 +1408,8 @@ func (ac *addrConn) tearDown(err error) {
 	curTr := ac.transport
 	ac.transport = nil
 	// We have to set the state to Shutdown before anything else to prevent races
-	// between setting the state and logic that waits on context cancelation / etc.
-	ac.updateConnectivityState(connectivity.Shutdown)
+	// between setting the state and logic that waits on context cancellation / etc.
+	ac.updateConnectivityState(connectivity.Shutdown, nil)
 	ac.cancel()
 	ac.curAddr = resolver.Address{}
 	if err == errConnDrain && curTr != nil {
@@ -1346,7 +1423,7 @@ func (ac *addrConn) tearDown(err error) {
 		ac.mu.Lock()
 	}
 	if channelz.IsOn() {
-		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
+		channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{
 			Desc:     "Subchannel Deleted",
 			Severity: channelz.CtINFO,
 			Parent: &channelz.TraceEventDesc{
@@ -1355,7 +1432,7 @@ func (ac *addrConn) tearDown(err error) {
 			},
 		})
 		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
-		// the entity beng deleted, and thus prevent it from being deleted right away.
+		// the entity being deleted, and thus prevent it from being deleted right away.
 		channelz.RemoveEntry(ac.channelzID)
 	}
 	ac.mu.Unlock()
@@ -1445,3 +1522,12 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
 // Deprecated: This error is never returned by grpc and should not be
 // referenced by users.
 var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+	for _, rb := range cc.dopts.resolvers {
+		if cc.parsedTarget.Scheme == rb.Scheme() {
+			return rb
+		}
+	}
+	return resolver.Get(cc.parsedTarget.Scheme)
+}

+ 100 - 181
vendor/google.golang.org/grpc/credentials/credentials.go

@@ -24,16 +24,12 @@ package credentials // import "google.golang.org/grpc/credentials"
 
 import (
 	"context"
-	"crypto/tls"
-	"crypto/x509"
 	"errors"
 	"fmt"
-	"io/ioutil"
 	"net"
-	"strings"
 
 	"github.com/golang/protobuf/proto"
-	"google.golang.org/grpc/credentials/internal"
+	"google.golang.org/grpc/internal"
 )
 
 // PerRPCCredentials defines the common interface for the credentials which need to
@@ -45,7 +41,8 @@ type PerRPCCredentials interface {
 	// context. If a status code is returned, it will be used as the status
 	// for the RPC. uri is the URI of the entry point for the request.
 	// When supported by the underlying implementation, ctx can be used for
-	// timeout and cancellation.
+	// timeout and cancellation. Additionally, RequestInfo data will be
+	// available via ctx to this call.
 	// TODO(zhaoq): Define the set of the qualified keys instead of leaving
 	// it as an arbitrary string.
 	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
@@ -54,6 +51,48 @@ type PerRPCCredentials interface {
 	RequireTransportSecurity() bool
 }
 
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+	// NoSecurity indicates a connection is insecure.
+	// The zero SecurityLevel value is invalid for backward compatibility.
+	NoSecurity SecurityLevel = iota + 1
+	// IntegrityOnly indicates a connection only provides integrity protection.
+	IntegrityOnly
+	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+	PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+	switch s {
+	case NoSecurity:
+		return "NoSecurity"
+	case IntegrityOnly:
+		return "IntegrityOnly"
+	case PrivacyAndIntegrity:
+		return "PrivacyAndIntegrity"
+	}
+	return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+	SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
+	return c
+}
+
 // ProtocolInfo provides information regarding the gRPC wire protocol version,
 // security protocol, security protocol version in use, server name, etc.
 type ProtocolInfo struct {
@@ -61,13 +100,19 @@ type ProtocolInfo struct {
 	ProtocolVersion string
 	// SecurityProtocol is the security protocol in use.
 	SecurityProtocol string
-	// SecurityVersion is the security protocol version.
+	// SecurityVersion is the security protocol version.  It is a static version string from the
+	// credentials, not a value that reflects per-connection protocol negotiation.  To retrieve
+	// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
+	//
+	// Deprecated: please use Peer.AuthInfo.
 	SecurityVersion string
 	// ServerName is the user-configured server name.
 	ServerName string
 }
 
 // AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
 type AuthInfo interface {
 	AuthType() string
 }
@@ -82,7 +127,8 @@ type TransportCredentials interface {
 	// ClientHandshake does the authentication handshake specified by the corresponding
 	// authentication protocol on rawConn for clients. It returns the authenticated
 	// connection and the corresponding auth information about the connection.
-	// Implementations must use the provided context to implement timely cancellation.
+	// The auth information should embed CommonAuthInfo to return additional information about
+	// the credentials. Implementations must use the provided context to implement timely cancellation.
 	// gRPC will try to reconnect if the error returned is a temporary error
 	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
 	// If the returned error is a wrapper error, implementations should make sure that
@@ -92,7 +138,8 @@ type TransportCredentials interface {
 	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
 	// ServerHandshake does the authentication handshake for servers. It returns
 	// the authenticated connection and the corresponding auth information about
-	// the connection.
+	// the connection. The auth information should embed CommonAuthInfo to return additional information
+	// about the credentials.
 	//
 	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
 	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
@@ -125,145 +172,63 @@ type Bundle interface {
 	NewWithMode(mode string) (Bundle, error)
 }
 
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
-	State tls.ConnectionState
-}
-
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
-	return "tls"
+// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
+//
+// This API is experimental.
+type RequestInfo struct {
+	// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
+	Method string
+	// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+	AuthInfo AuthInfo
 }
 
-// GetSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
-	v := &TLSChannelzSecurityValue{
-		StandardName: cipherSuiteLookup[t.State.CipherSuite],
-	}
-	// Currently there's no way to get LocalCertificate info from tls package.
-	if len(t.State.PeerCertificates) > 0 {
-		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
-	}
-	return v
-}
+// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
+type requestInfoKey struct{}
 
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
-	// TLS configuration
-	config *tls.Config
+// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
+//
+// This API is experimental.
+func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
+	ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
+	return
 }
 
-func (c tlsCreds) Info() ProtocolInfo {
-	return ProtocolInfo{
-		SecurityProtocol: "tls",
-		SecurityVersion:  "1.2",
-		ServerName:       c.config.ServerName,
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
+	type internalInfo interface {
+		GetCommonAuthInfo() *CommonAuthInfo
 	}
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
-	// use local cfg to avoid clobbering ServerName if using multiple endpoints
-	cfg := cloneTLSConfig(c.config)
-	if cfg.ServerName == "" {
-		colonPos := strings.LastIndex(authority, ":")
-		if colonPos == -1 {
-			colonPos = len(authority)
-		}
-		cfg.ServerName = authority[:colonPos]
+	ri, _ := RequestInfoFromContext(ctx)
+	if ri.AuthInfo == nil {
+		return errors.New("unable to obtain SecurityLevel from context")
 	}
-	conn := tls.Client(rawConn, cfg)
-	errChannel := make(chan error, 1)
-	go func() {
-		errChannel <- conn.Handshake()
-	}()
-	select {
-	case err := <-errChannel:
-		if err != nil {
-			return nil, nil, err
+	if ci, ok := ri.AuthInfo.(internalInfo); ok {
+		// CommonAuthInfo.SecurityLevel has an invalid value.
+		if ci.GetCommonAuthInfo().SecurityLevel == 0 {
+			return nil
 		}
-	case <-ctx.Done():
-		return nil, nil, ctx.Err()
-	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
-	conn := tls.Server(rawConn, c.config)
-	if err := conn.Handshake(); err != nil {
-		return nil, nil, err
-	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
-	return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
-	c.config.ServerName = serverNameOverride
-	return nil
-}
-
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
-	for _, p := range ps {
-		if p == alpnProtoStrH2 {
-			return ps
+		if ci.GetCommonAuthInfo().SecurityLevel < level {
+			return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
 		}
 	}
-	ret := make([]string, 0, len(ps)+1)
-	ret = append(ret, ps...)
-	return append(ret, alpnProtoStrH2)
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
-	tc := &tlsCreds{cloneTLSConfig(c)}
-	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
-	return tc
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
-	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
-	b, err := ioutil.ReadFile(certFile)
-	if err != nil {
-		return nil, err
-	}
-	cp := x509.NewCertPool()
-	if !cp.AppendCertsFromPEM(b) {
-		return nil, fmt.Errorf("credentials: failed to append certificates")
-	}
-	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
-	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+	// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
+	return nil
 }
 
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
-	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
-	if err != nil {
-		return nil, err
+func init() {
+	internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
+		return context.WithValue(ctx, requestInfoKey{}, ri)
 	}
-	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
 }
 
 // ChannelzSecurityInfo defines the interface that security protocols should implement
 // in order to provide security info to channelz.
+//
+// This API is experimental.
 type ChannelzSecurityInfo interface {
 	GetSecurityValue() ChannelzSecurityValue
 }
@@ -271,66 +236,20 @@ type ChannelzSecurityInfo interface {
 // ChannelzSecurityValue defines the interface that GetSecurityValue() return value
 // should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
 // and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
 type ChannelzSecurityValue interface {
 	isChannelzSecurityValue()
 }
 
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
-	ChannelzSecurityValue
-	StandardName      string
-	LocalCertificate  []byte
-	RemoteCertificate []byte
-}
-
 // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
 // from GetSecurityValue(), which contains protocol specific security info. Note
 // the Value field will be sent to users of channelz requesting channel info, and
 // thus sensitive info should better be avoided.
+//
+// This API is experimental.
 type OtherChannelzSecurityValue struct {
 	ChannelzSecurityValue
 	Name  string
 	Value proto.Message
 }
-
-var cipherSuiteLookup = map[uint16]string{
-	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
-	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
-	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
-	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
-	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-
-	return cfg.Clone()
-}

+ 0 - 0
vendor/google.golang.org/grpc/credentials/tls13.go → vendor/google.golang.org/grpc/credentials/go12.go


+ 225 - 0
vendor/google.golang.org/grpc/credentials/tls.go

@@ -0,0 +1,225 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net"
+
+	"google.golang.org/grpc/credentials/internal"
+)
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+	State tls.ConnectionState
+	CommonAuthInfo
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+	return "tls"
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+	v := &TLSChannelzSecurityValue{
+		StandardName: cipherSuiteLookup[t.State.CipherSuite],
+	}
+	// Currently there's no way to get LocalCertificate info from tls package.
+	if len(t.State.PeerCertificates) > 0 {
+		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	}
+	return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+	// TLS configuration
+	config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+	return ProtocolInfo{
+		SecurityProtocol: "tls",
+		SecurityVersion:  "1.2",
+		ServerName:       c.config.ServerName,
+	}
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+	// use local cfg to avoid clobbering ServerName if using multiple endpoints
+	cfg := cloneTLSConfig(c.config)
+	if cfg.ServerName == "" {
+		serverName, _, err := net.SplitHostPort(authority)
+		if err != nil {
+			// If the authority had no host port or if the authority cannot be parsed, use it as-is.
+			serverName = authority
+		}
+		cfg.ServerName = serverName
+	}
+	conn := tls.Client(rawConn, cfg)
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+		close(errChannel)
+	}()
+	select {
+	case err := <-errChannel:
+		if err != nil {
+			conn.Close()
+			return nil, nil, err
+		}
+	case <-ctx.Done():
+		conn.Close()
+		return nil, nil, ctx.Err()
+	}
+	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+	conn := tls.Server(rawConn, c.config)
+	if err := conn.Handshake(); err != nil {
+		conn.Close()
+		return nil, nil, err
+	}
+	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+	return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+	c.config.ServerName = serverNameOverride
+	return nil
+}
+
+const alpnProtoStrH2 = "h2"
+
+func appendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+	tc := &tlsCreds{cloneTLSConfig(c)}
+	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
+	return tc
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header field) in requests.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+	b, err := ioutil.ReadFile(certFile)
+	if err != nil {
+		return nil, err
+	}
+	cp := x509.NewCertPool()
+	if !cp.AppendCertsFromPEM(b) {
+		return nil, fmt.Errorf("credentials: failed to append certificates")
+	}
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// This API is EXPERIMENTAL.
+type TLSChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	StandardName      string
+	LocalCertificate  []byte
+	RemoteCertificate []byte
+}
+
+var cipherSuiteLookup = map[uint16]string{
+	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
+	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+}
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}

+ 61 - 21
vendor/google.golang.org/grpc/dialoptions.go

@@ -24,11 +24,12 @@ import (
 	"net"
 	"time"
 
+	"google.golang.org/grpc/backoff"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
-	"google.golang.org/grpc/internal/backoff"
+	internalbackoff "google.golang.org/grpc/internal/backoff"
 	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
@@ -47,7 +48,7 @@ type dialOptions struct {
 
 	cp          Compressor
 	dc          Decompressor
-	bs          backoff.Strategy
+	bs          internalbackoff.Strategy
 	block       bool
 	insecure    bool
 	timeout     time.Duration
@@ -57,9 +58,7 @@ type dialOptions struct {
 	callOptions []CallOption
 	// This is used by v1 balancer dial option WithBalancer to support v1
 	// balancer, and also by WithBalancerName dial option.
-	balancerBuilder balancer.Builder
-	// This is to support grpclb.
-	resolverBuilder             resolver.Builder
+	balancerBuilder             balancer.Builder
 	channelzParentID            int64
 	disableServiceConfig        bool
 	disableRetry                bool
@@ -68,6 +67,11 @@ type dialOptions struct {
 	minConnectTimeout           func() time.Duration
 	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
 	defaultServiceConfigRawJSON *string
+	// This is used by ccResolverWrapper to backoff between successive calls to
+	// resolver.ResolveNow(). The user will have no need to configure this, but
+	// we need to be able to configure this in tests.
+	resolveNowBackoff func(int) time.Duration
+	resolvers         []resolver.Builder
 }
 
 // DialOption configures how we set up the connection.
@@ -226,13 +230,6 @@ func WithBalancerName(balancerName string) DialOption {
 	})
 }
 
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.resolverBuilder = b
-	})
-}
-
 // WithServiceConfig returns a DialOption which has a channel to read the
 // service configuration.
 //
@@ -246,8 +243,28 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
 	})
 }
 
+// WithConnectParams configures the dialer to use the provided ConnectParams.
+//
+// The backoff configuration specified as part of the ConnectParams overrides
+// all defaults specified in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
+// using the backoff.DefaultConfig as a base, in cases where you want to
+// override only a subset of the backoff configuration.
+//
+// This API is EXPERIMENTAL.
+func WithConnectParams(p ConnectParams) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = internalbackoff.Exponential{Config: p.Backoff}
+		o.minConnectTimeout = func() time.Duration {
+			return p.MinConnectTimeout
+		}
+	})
+}
+
 // WithBackoffMaxDelay configures the dialer to use the provided maximum delay
 // when backing off after failed connection attempts.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
 func WithBackoffMaxDelay(md time.Duration) DialOption {
 	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
 }
@@ -255,19 +272,18 @@ func WithBackoffMaxDelay(md time.Duration) DialOption {
 // WithBackoffConfig configures the dialer to use the provided backoff
 // parameters after connection failures.
 //
-// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
-// for use.
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
 func WithBackoffConfig(b BackoffConfig) DialOption {
-	return withBackoff(backoff.Exponential{
-		MaxDelay: b.MaxDelay,
-	})
+	bc := backoff.DefaultConfig
+	bc.MaxDelay = b.MaxDelay
+	return withBackoff(internalbackoff.Exponential{Config: bc})
 }
 
 // withBackoff sets the backoff strategy used for connectRetryNum after a failed
 // connection attempt.
 //
 // This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs backoff.Strategy) DialOption {
+func withBackoff(bs internalbackoff.Strategy) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.bs = bs
 	})
@@ -322,8 +338,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
 // WithTimeout returns a DialOption that configures a timeout for dialing a
 // ClientConn initially. This is valid if and only if WithBlock() is present.
 //
-// Deprecated: use DialContext and context.WithTimeout instead.  Will be
-// supported throughout 1.x.
+// Deprecated: use DialContext instead of Dial and context.WithTimeout
+// instead.  Will be supported throughout 1.x.
 func WithTimeout(d time.Duration) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.timeout = d
@@ -341,7 +357,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
 }
 
 func init() {
-	internal.WithResolverBuilder = withResolverBuilder
 	internal.WithHealthCheckFunc = withHealthCheckFunc
 }
 
@@ -455,6 +470,8 @@ func WithAuthority(a string) DialOption {
 // WithChannelzParentID returns a DialOption that specifies the channelz ID of
 // current ClientConn's parent. This function is used in nested channel creation
 // (e.g. grpclb dial).
+//
+// This API is EXPERIMENTAL.
 func WithChannelzParentID(id int64) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.channelzParentID = id
@@ -539,6 +556,7 @@ func defaultDialOptions() dialOptions {
 			WriteBufferSize: defaultWriteBufSize,
 			ReadBufferSize:  defaultReadBufSize,
 		},
+		resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
 	}
 }
 
@@ -552,3 +570,25 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
 		o.minConnectTimeout = f
 	})
 }
+
+// withResolveNowBackoff specifies the function that clientconn uses to backoff
+// between successive calls to resolver.ResolveNow().
+//
+// For testing purpose only.
+func withResolveNowBackoff(f func(int) time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolveNowBackoff = f
+	})
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register.  They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// This API is EXPERIMENTAL.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolvers = append(o.resolvers, rs...)
+	})
+}

+ 4 - 0
vendor/google.golang.org/grpc/encoding/encoding.go

@@ -46,6 +46,10 @@ type Compressor interface {
 	// coding header.  The result must be static; the result cannot change
 	// between calls.
 	Name() string
+	// EXPERIMENTAL: if a Compressor implements
+	// DecompressedSize(compressedBytes []byte) int, gRPC will call it
+	// to determine the size of the buffer allocated for the result of decompression.
+	// Return -1 to indicate unknown size.
 }
 
 var registeredCompressor = make(map[string]Compressor)

+ 6 - 9
vendor/google.golang.org/grpc/go.mod

@@ -1,19 +1,16 @@
 module google.golang.org/grpc
 
+go 1.11
+
 require (
-	cloud.google.com/go v0.26.0 // indirect
-	github.com/BurntSushi/toml v0.3.1 // indirect
-	github.com/client9/misspell v0.3.4
+	github.com/envoyproxy/go-control-plane v0.9.4
+	github.com/envoyproxy/protoc-gen-validate v0.1.0
 	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
 	github.com/golang/mock v1.1.1
-	github.com/golang/protobuf v1.2.0
+	github.com/golang/protobuf v1.3.3
 	github.com/google/go-cmp v0.2.0
-	golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
 	golang.org/x/net v0.0.0-20190311183353-d8887717615a
 	golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
 	golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
-	golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
-	google.golang.org/appengine v1.1.0 // indirect
-	google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
-	honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
+	google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
 )

+ 25 - 19
vendor/google.golang.org/grpc/grpclog/grpclog.go

@@ -26,72 +26,78 @@
 // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
 package grpclog // import "google.golang.org/grpc/grpclog"
 
-import "os"
+import (
+	"os"
 
-var logger = newLoggerV2()
+	"google.golang.org/grpc/internal/grpclog"
+)
+
+func init() {
+	SetLoggerV2(newLoggerV2())
+}
 
 // V reports whether verbosity level l is at least the requested verbose level.
 func V(l int) bool {
-	return logger.V(l)
+	return grpclog.Logger.V(l)
 }
 
 // Info logs to the INFO log.
 func Info(args ...interface{}) {
-	logger.Info(args...)
+	grpclog.Logger.Info(args...)
 }
 
 // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
 func Infof(format string, args ...interface{}) {
-	logger.Infof(format, args...)
+	grpclog.Logger.Infof(format, args...)
 }
 
 // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
 func Infoln(args ...interface{}) {
-	logger.Infoln(args...)
+	grpclog.Logger.Infoln(args...)
 }
 
 // Warning logs to the WARNING log.
 func Warning(args ...interface{}) {
-	logger.Warning(args...)
+	grpclog.Logger.Warning(args...)
 }
 
 // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
 func Warningf(format string, args ...interface{}) {
-	logger.Warningf(format, args...)
+	grpclog.Logger.Warningf(format, args...)
 }
 
 // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
 func Warningln(args ...interface{}) {
-	logger.Warningln(args...)
+	grpclog.Logger.Warningln(args...)
 }
 
 // Error logs to the ERROR log.
 func Error(args ...interface{}) {
-	logger.Error(args...)
+	grpclog.Logger.Error(args...)
 }
 
 // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
 func Errorf(format string, args ...interface{}) {
-	logger.Errorf(format, args...)
+	grpclog.Logger.Errorf(format, args...)
 }
 
 // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
 func Errorln(args ...interface{}) {
-	logger.Errorln(args...)
+	grpclog.Logger.Errorln(args...)
 }
 
 // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
 // It calls os.Exit() with exit code 1.
 func Fatal(args ...interface{}) {
-	logger.Fatal(args...)
+	grpclog.Logger.Fatal(args...)
 	// Make sure fatal logs will exit.
 	os.Exit(1)
 }
 
 // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
-// It calles os.Exit() with exit code 1.
+// It calls os.Exit() with exit code 1.
 func Fatalf(format string, args ...interface{}) {
-	logger.Fatalf(format, args...)
+	grpclog.Logger.Fatalf(format, args...)
 	// Make sure fatal logs will exit.
 	os.Exit(1)
 }
@@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) {
 // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
 // It calle os.Exit()) with exit code 1.
 func Fatalln(args ...interface{}) {
-	logger.Fatalln(args...)
+	grpclog.Logger.Fatalln(args...)
 	// Make sure fatal logs will exit.
 	os.Exit(1)
 }
@@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) {
 //
 // Deprecated: use Info.
 func Print(args ...interface{}) {
-	logger.Info(args...)
+	grpclog.Logger.Info(args...)
 }
 
 // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
 //
 // Deprecated: use Infof.
 func Printf(format string, args ...interface{}) {
-	logger.Infof(format, args...)
+	grpclog.Logger.Infof(format, args...)
 }
 
 // Println prints to the logger. Arguments are handled in the manner of fmt.Println.
 //
 // Deprecated: use Infoln.
 func Println(args ...interface{}) {
-	logger.Infoln(args...)
+	grpclog.Logger.Infoln(args...)
 }

+ 3 - 1
vendor/google.golang.org/grpc/grpclog/logger.go

@@ -18,6 +18,8 @@
 
 package grpclog
 
+import "google.golang.org/grpc/internal/grpclog"
+
 // Logger mimics golang's standard Logger as an interface.
 //
 // Deprecated: use LoggerV2.
@@ -35,7 +37,7 @@ type Logger interface {
 //
 // Deprecated: use SetLoggerV2.
 func SetLogger(l Logger) {
-	logger = &loggerWrapper{Logger: l}
+	grpclog.Logger = &loggerWrapper{Logger: l}
 }
 
 // loggerWrapper wraps Logger into a LoggerV2.

+ 20 - 1
vendor/google.golang.org/grpc/grpclog/loggerv2.go

@@ -24,6 +24,8 @@ import (
 	"log"
 	"os"
 	"strconv"
+
+	"google.golang.org/grpc/internal/grpclog"
 )
 
 // LoggerV2 does underlying logging work for grpclog.
@@ -65,7 +67,8 @@ type LoggerV2 interface {
 // SetLoggerV2 sets logger that is used in grpc to a V2 logger.
 // Not mutex-protected, should be called before any gRPC functions.
 func SetLoggerV2(l LoggerV2) {
-	logger = l
+	grpclog.Logger = l
+	grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
 }
 
 const (
@@ -193,3 +196,19 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) {
 func (g *loggerT) V(l int) bool {
 	return l <= g.v
 }
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// This API is EXPERIMENTAL.
+type DepthLoggerV2 interface {
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	InfoDepth(depth int, args ...interface{})
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	WarningDepth(depth int, args ...interface{})
+	// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	ErrorDepth(depth int, args ...interface{})
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	FatalDepth(depth int, args ...interface{})
+}

+ 21 - 21
vendor/google.golang.org/grpc/health/client.go

@@ -33,20 +33,20 @@ import (
 	"google.golang.org/grpc/status"
 )
 
-const maxDelay = 120 * time.Second
-
-var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
-var backoffFunc = func(ctx context.Context, retries int) bool {
-	d := backoffStrategy.Backoff(retries)
-	timer := time.NewTimer(d)
-	select {
-	case <-timer.C:
-		return true
-	case <-ctx.Done():
-		timer.Stop()
-		return false
+var (
+	backoffStrategy = backoff.DefaultExponential
+	backoffFunc     = func(ctx context.Context, retries int) bool {
+		d := backoffStrategy.Backoff(retries)
+		timer := time.NewTimer(d)
+		select {
+		case <-timer.C:
+			return true
+		case <-ctx.Done():
+			timer.Stop()
+			return false
+		}
 	}
-}
+)
 
 func init() {
 	internal.HealthCheckFunc = clientHealthCheck
@@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch"
 
 // This function implements the protocol defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
+func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error {
 	tryCnt := 0
 
 retryConnection:
@@ -70,7 +70,7 @@ retryConnection:
 		if ctx.Err() != nil {
 			return nil
 		}
-		setConnectivityState(connectivity.Connecting)
+		setConnectivityState(connectivity.Connecting, nil)
 		rawS, err := newStream(healthCheckMethod)
 		if err != nil {
 			continue retryConnection
@@ -79,7 +79,7 @@ retryConnection:
 		s, ok := rawS.(grpc.ClientStream)
 		// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
 		if !ok {
-			setConnectivityState(connectivity.Ready)
+			setConnectivityState(connectivity.Ready, nil)
 			return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
 		}
 
@@ -95,22 +95,22 @@ retryConnection:
 
 			// Reports healthy for the LBing purposes if health check is not implemented in the server.
 			if status.Code(err) == codes.Unimplemented {
-				setConnectivityState(connectivity.Ready)
+				setConnectivityState(connectivity.Ready, nil)
 				return err
 			}
 
 			// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
 			if err != nil {
-				setConnectivityState(connectivity.TransientFailure)
+				setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err))
 				continue retryConnection
 			}
 
-			// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
+			// As a message has been received, removes the need for backoff for the next retry by resetting the try count.
 			tryCnt = 0
 			if resp.Status == healthpb.HealthCheckResponse_SERVING {
-				setConnectivityState(connectivity.Ready)
+				setConnectivityState(connectivity.Ready, nil)
 			} else {
-				setConnectivityState(connectivity.TransientFailure)
+				setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status))
 			}
 		}
 	}

+ 60 - 44
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go

@@ -1,15 +1,16 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // source: grpc/health/v1/health.proto
 
-package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+package grpc_health_v1
 
 import (
-	context "golang.org/x/net/context"
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
 	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
 )
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -21,7 +22,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
 
 type HealthCheckResponse_ServingStatus int32
 
@@ -38,6 +39,7 @@ var HealthCheckResponse_ServingStatus_name = map[int32]string{
 	2: "NOT_SERVING",
 	3: "SERVICE_UNKNOWN",
 }
+
 var HealthCheckResponse_ServingStatus_value = map[string]int32{
 	"UNKNOWN":         0,
 	"SERVING":         1,
@@ -48,8 +50,9 @@ var HealthCheckResponse_ServingStatus_value = map[string]int32{
 func (x HealthCheckResponse_ServingStatus) String() string {
 	return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
 }
+
 func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
+	return fileDescriptor_e265fd9d4e077217, []int{1, 0}
 }
 
 type HealthCheckRequest struct {
@@ -63,16 +66,17 @@ func (m *HealthCheckRequest) Reset()         { *m = HealthCheckRequest{} }
 func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
 func (*HealthCheckRequest) ProtoMessage()    {}
 func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
+	return fileDescriptor_e265fd9d4e077217, []int{0}
 }
+
 func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
 }
 func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
 }
-func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
+func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HealthCheckRequest.Merge(m, src)
 }
 func (m *HealthCheckRequest) XXX_Size() int {
 	return xxx_messageInfo_HealthCheckRequest.Size(m)
@@ -101,16 +105,17 @@ func (m *HealthCheckResponse) Reset()         { *m = HealthCheckResponse{} }
 func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
 func (*HealthCheckResponse) ProtoMessage()    {}
 func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
+	return fileDescriptor_e265fd9d4e077217, []int{1}
 }
+
 func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
 	return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
 }
 func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 	return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
 }
-func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
+func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HealthCheckResponse.Merge(m, src)
 }
 func (m *HealthCheckResponse) XXX_Size() int {
 	return xxx_messageInfo_HealthCheckResponse.Size(m)
@@ -129,18 +134,43 @@ func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
 }
 
 func init() {
+	proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
 	proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
 	proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
-	proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
+}
+
+func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) }
+
+var fileDescriptor_e265fd9d4e077217 = []byte{
+	// 297 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
+	0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
+	0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
+	0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
+	0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
+	0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
+	0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
+	0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
+	0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
+	0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
+	0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
+	0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
+	0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
+	0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
+	0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
+	0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
+	0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
+	0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
+	0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ context.Context
-var _ grpc.ClientConn
+var _ grpc.ClientConnInterface
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+const _ = grpc.SupportPackageIsVersion6
 
 // HealthClient is the client API for Health service.
 //
@@ -168,10 +198,10 @@ type HealthClient interface {
 }
 
 type healthClient struct {
-	cc *grpc.ClientConn
+	cc grpc.ClientConnInterface
 }
 
-func NewHealthClient(cc *grpc.ClientConn) HealthClient {
+func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
 	return &healthClient{cc}
 }
 
@@ -239,6 +269,17 @@ type HealthServer interface {
 	Watch(*HealthCheckRequest, Health_WatchServer) error
 }
 
+// UnimplementedHealthServer can be embedded to have forward compatible implementations.
+type UnimplementedHealthServer struct {
+}
+
+func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
+}
+func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error {
+	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
 func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
 	s.RegisterService(&_Health_serviceDesc, srv)
 }
@@ -300,28 +341,3 @@ var _Health_serviceDesc = grpc.ServiceDesc{
 	},
 	Metadata: "grpc/health/v1/health.proto",
 }
-
-func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
-
-var fileDescriptor_health_6b1a06aa67f91efd = []byte{
-	// 297 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
-	0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
-	0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
-	0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
-	0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
-	0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
-	0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
-	0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
-	0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
-	0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
-	0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
-	0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
-	0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
-	0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
-	0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
-	0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
-	0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
-	0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
-	0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
-}

+ 5 - 5
vendor/google.golang.org/grpc/health/server.go

@@ -35,7 +35,7 @@ import (
 
 // Server implements `service Health`.
 type Server struct {
-	mu sync.Mutex
+	mu sync.RWMutex
 	// If shutdown is true, it's expected all serving status is NOT_SERVING, and
 	// will stay in NOT_SERVING.
 	shutdown bool
@@ -54,8 +54,8 @@ func NewServer() *Server {
 
 // Check implements `service Health`.
 func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
+	s.mu.RLock()
+	defer s.mu.RUnlock()
 	if servingStatus, ok := s.statusMap[in.Service]; ok {
 		return &healthpb.HealthCheckResponse{
 			Status: servingStatus,
@@ -139,7 +139,7 @@ func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.H
 // Shutdown sets all serving status to NOT_SERVING, and configures the server to
 // ignore all future status changes.
 //
-// This changes serving status for all services. To set status for a perticular
+// This changes serving status for all services. To set status for a particular
 // services, call SetServingStatus().
 func (s *Server) Shutdown() {
 	s.mu.Lock()
@@ -153,7 +153,7 @@ func (s *Server) Shutdown() {
 // Resume sets all serving status to SERVING, and configures the server to
 // accept all future status changes.
 //
-// This changes serving status for all services. To set status for a perticular
+// This changes serving status for all services. To set status for a particular
 // services, call SetServingStatus().
 func (s *Server) Resume() {
 	s.mu.Lock()

+ 11 - 16
vendor/google.golang.org/grpc/internal/backoff/backoff.go

@@ -25,44 +25,39 @@ package backoff
 import (
 	"time"
 
+	grpcbackoff "google.golang.org/grpc/backoff"
 	"google.golang.org/grpc/internal/grpcrand"
 )
 
 // Strategy defines the methodology for backing off after a grpc connection
 // failure.
-//
 type Strategy interface {
 	// Backoff returns the amount of time to wait before the next retry given
 	// the number of consecutive failures.
 	Backoff(retries int) time.Duration
 }
 
-const (
-	// baseDelay is the amount of time to wait before retrying after the first
-	// failure.
-	baseDelay = 1.0 * time.Second
-	// factor is applied to the backoff after each retry.
-	factor = 1.6
-	// jitter provides a range to randomize backoff delays.
-	jitter = 0.2
-)
+// DefaultExponential is an exponential backoff implementation using the
+// default values for all the configurable knobs defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
 
 // Exponential implements exponential backoff algorithm as defined in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
 type Exponential struct {
-	// MaxDelay is the upper bound of backoff delay.
-	MaxDelay time.Duration
+	// Config contains all options to configure the backoff algorithm.
+	Config grpcbackoff.Config
 }
 
 // Backoff returns the amount of time to wait before the next retry given the
 // number of retries.
 func (bc Exponential) Backoff(retries int) time.Duration {
 	if retries == 0 {
-		return baseDelay
+		return bc.Config.BaseDelay
 	}
-	backoff, max := float64(baseDelay), float64(bc.MaxDelay)
+	backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
 	for backoff < max && retries > 0 {
-		backoff *= factor
+		backoff *= bc.Config.Multiplier
 		retries--
 	}
 	if backoff > max {
@@ -70,7 +65,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
 	}
 	// Randomize backoff delays so that if a cluster of requests start at
 	// the same time, they won't operate in lockstep.
-	backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
+	backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
 	if backoff < 0 {
 		return 0
 	}

+ 6 - 6
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go

@@ -34,7 +34,7 @@ type Logger interface {
 }
 
 // binLogger is the global binary logger for the binary. One of this should be
-// built at init time from the configuration (environment varialbe or flags).
+// built at init time from the configuration (environment variable or flags).
 //
 // It is used to get a methodLogger for each individual method.
 var binLogger Logger
@@ -98,7 +98,7 @@ func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
 // New methodLogger with same service overrides the old one.
 func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
 	if _, ok := l.services[service]; ok {
-		return fmt.Errorf("conflicting rules for service %v found", service)
+		return fmt.Errorf("conflicting service rules for service %v found", service)
 	}
 	if l.services == nil {
 		l.services = make(map[string]*methodLoggerConfig)
@@ -112,10 +112,10 @@ func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig)
 // New methodLogger with same method overrides the old one.
 func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
 	if _, ok := l.blacklist[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
 	}
 	if _, ok := l.methods[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting method rules for method %v found", method)
 	}
 	if l.methods == nil {
 		l.methods = make(map[string]*methodLoggerConfig)
@@ -127,10 +127,10 @@ func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) er
 // Set blacklist method for "-service/method".
 func (l *logger) setBlacklist(method string) error {
 	if _, ok := l.blacklist[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
 	}
 	if _, ok := l.methods[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+		return fmt.Errorf("conflicting method rules for method %v found", method)
 	}
 	if l.blacklist == nil {
 		l.blacklist = make(map[string]struct{})

+ 2 - 2
vendor/google.golang.org/grpc/internal/binarylog/env_config.go

@@ -43,7 +43,7 @@ import (
 //    Foo.
 //
 // If two configs exist for one certain method or service, the one specified
-// later overrides the privous config.
+// later overrides the previous config.
 func NewLoggerFromConfigString(s string) Logger {
 	if s == "" {
 		return nil
@@ -74,7 +74,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
 			return fmt.Errorf("invalid config: %q, %v", config, err)
 		}
 		if m == "*" {
-			return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
+			return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
 		}
 		if suffix != "" {
 			return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")

+ 1 - 1
vendor/google.golang.org/grpc/internal/binarylog/sink.go

@@ -63,7 +63,7 @@ func (ns *noopSink) Close() error                 { return nil }
 
 // newWriterSink creates a binary log sink with the given writer.
 //
-// Write() marshalls the proto message and writes it to the given writer. Each
+// Write() marshals the proto message and writes it to the given writer. Each
 // message is prefixed with a 4 byte big endian unsigned integer as the length.
 //
 // No buffer is done, Close() doesn't try to close the writer.

+ 85 - 0
vendor/google.golang.org/grpc/internal/buffer/unbounded.go

@@ -0,0 +1,85 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package buffer provides an implementation of an unbounded buffer.
+package buffer
+
+import "sync"
+
+// Unbounded is an implementation of an unbounded buffer which does not use
+// extra goroutines. This is typically used for passing updates from one entity
+// to another within gRPC.
+//
+// All methods on this type are thread-safe and don't block on anything except
+// the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `interface{}`. This means that a call to Put() incurs an extra memory
+// allocation, and also that users need a type assertion while reading. For
+// performance critical code paths, using Unbounded is strongly discouraged and
+// defining a new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
+type Unbounded struct {
+	c       chan interface{}
+	mu      sync.Mutex
+	backlog []interface{}
+}
+
+// NewUnbounded returns a new instance of Unbounded.
+func NewUnbounded() *Unbounded {
+	return &Unbounded{c: make(chan interface{}, 1)}
+}
+
+// Put adds t to the unbounded buffer.
+func (b *Unbounded) Put(t interface{}) {
+	b.mu.Lock()
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- t:
+			b.mu.Unlock()
+			return
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, t)
+	b.mu.Unlock()
+}
+
+// Load sends the earliest buffered data, if any, onto the read channel
+// returned by Get(). Users are expected to call this every time they read a
+// value from the read channel.
+func (b *Unbounded) Load() {
+	b.mu.Lock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog[0] = nil
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	}
+	b.mu.Unlock()
+}
+
+// Get returns a read channel on which values added to the buffer, via Put(),
+// are sent on.
+//
+// Upon reading a value from this channel, users are expected to call Load() to
+// send the next buffered value onto the channel if there is any.
+func (b *Unbounded) Get() <-chan interface{} {
+	return b.c
+}

+ 17 - 5
vendor/google.golang.org/grpc/internal/channelz/funcs.go

@@ -30,7 +30,7 @@ import (
 	"sync/atomic"
 	"time"
 
-	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpclog"
 )
 
 const (
@@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
 // by pid). It returns the unique channelz tracking id assigned to this subchannel.
 func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
 	if pid == 0 {
-		grpclog.Error("a SubChannel's parent id cannot be 0")
+		grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0")
 		return 0
 	}
 	id := idGen.genID()
@@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 {
 // this listen socket.
 func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
 	if pid == 0 {
-		grpclog.Error("a ListenSocket's parent id cannot be 0")
+		grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0")
 		return 0
 	}
 	id := idGen.genID()
@@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
 // this normal socket.
 func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
 	if pid == 0 {
-		grpclog.Error("a NormalSocket's parent id cannot be 0")
+		grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0")
 		return 0
 	}
 	id := idGen.genID()
@@ -294,7 +294,19 @@ type TraceEventDesc struct {
 }
 
 // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, desc *TraceEventDesc) {
+func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) {
+	for d := desc; d != nil; d = d.Parent {
+		switch d.Severity {
+		case CtUNKNOWN:
+			grpclog.InfoDepth(depth+1, d.Desc)
+		case CtINFO:
+			grpclog.InfoDepth(depth+1, d.Desc)
+		case CtWarning:
+			grpclog.WarningDepth(depth+1, d.Desc)
+		case CtError:
+			grpclog.ErrorDepth(depth+1, d.Desc)
+		}
+	}
 	if getMaxTraceEntry() == 0 {
 		return
 	}

+ 100 - 0
vendor/google.golang.org/grpc/internal/channelz/logging.go

@@ -0,0 +1,100 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/grpclog"
+)
+
+// Info logs through grpclog.Info and adds a trace event if channelz is on.
+func Info(id int64, args ...interface{}) {
+	if IsOn() {
+		AddTraceEvent(id, 1, &TraceEventDesc{
+			Desc:     fmt.Sprint(args...),
+			Severity: CtINFO,
+		})
+	} else {
+		grpclog.InfoDepth(1, args...)
+	}
+}
+
+// Infof logs through grpclog.Infof and adds a trace event if channelz is on.
+func Infof(id int64, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	if IsOn() {
+		AddTraceEvent(id, 1, &TraceEventDesc{
+			Desc:     msg,
+			Severity: CtINFO,
+		})
+	} else {
+		grpclog.InfoDepth(1, msg)
+	}
+}
+
+// Warning logs through grpclog.Warning and adds a trace event if channelz is on.
+func Warning(id int64, args ...interface{}) {
+	if IsOn() {
+		AddTraceEvent(id, 1, &TraceEventDesc{
+			Desc:     fmt.Sprint(args...),
+			Severity: CtWarning,
+		})
+	} else {
+		grpclog.WarningDepth(1, args...)
+	}
+}
+
+// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on.
+func Warningf(id int64, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	if IsOn() {
+		AddTraceEvent(id, 1, &TraceEventDesc{
+			Desc:     msg,
+			Severity: CtWarning,
+		})
+	} else {
+		grpclog.WarningDepth(1, msg)
+	}
+}
+
+// Error logs through grpclog.Error and adds a trace event if channelz is on.
+func Error(id int64, args ...interface{}) {
+	if IsOn() {
+		AddTraceEvent(id, 1, &TraceEventDesc{
+			Desc:     fmt.Sprint(args...),
+			Severity: CtError,
+		})
+	} else {
+		grpclog.ErrorDepth(1, args...)
+	}
+}
+
+// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on.
+func Errorf(id int64, format string, args ...interface{}) {
+	msg := fmt.Sprintf(format, args...)
+	if IsOn() {
+		AddTraceEvent(id, 1, &TraceEventDesc{
+			Desc:     msg,
+			Severity: CtError,
+		})
+	} else {
+		grpclog.ErrorDepth(1, msg)
+	}
+}

+ 5 - 2
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go

@@ -25,11 +25,14 @@ import (
 )
 
 const (
-	prefix   = "GRPC_GO_"
-	retryStr = prefix + "RETRY"
+	prefix          = "GRPC_GO_"
+	retryStr        = prefix + "RETRY"
+	txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
 )
 
 var (
 	// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
 	Retry = strings.EqualFold(os.Getenv(retryStr), "on")
+	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+	TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
 )

+ 118 - 0
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go

@@ -0,0 +1,118 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog (internal) defines depth logging for grpc.
+package grpclog
+
+// Logger is the logger used for the non-depth log functions.
+var Logger LoggerV2
+
+// DepthLogger is the logger used for the depth log functions.
+var DepthLogger DepthLoggerV2
+
+// InfoDepth logs to the INFO log at the specified depth.
+func InfoDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.InfoDepth(depth, args...)
+	} else {
+		Logger.Info(args...)
+	}
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+func WarningDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.WarningDepth(depth, args...)
+	} else {
+		Logger.Warning(args...)
+	}
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+func ErrorDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.ErrorDepth(depth, args...)
+	} else {
+		Logger.Error(args...)
+	}
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+func FatalDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.FatalDepth(depth, args...)
+	} else {
+		Logger.Fatal(args...)
+	}
+}
+
+// LoggerV2 does underlying logging work for grpclog.
+// This is a copy of the LoggerV2 defined in the external grpclog package. It
+// is defined here to avoid a circular dependency.
+type LoggerV2 interface {
+	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+	Info(args ...interface{})
+	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+	Infoln(args ...interface{})
+	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+	Infof(format string, args ...interface{})
+	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+	Warning(args ...interface{})
+	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+	Warningln(args ...interface{})
+	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+	Warningf(format string, args ...interface{})
+	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	Error(args ...interface{})
+	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	Errorln(args ...interface{})
+	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	Errorf(format string, args ...interface{})
+	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatal(args ...interface{})
+	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalln(args ...interface{})
+	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalf(format string, args ...interface{})
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l int) bool
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
+// It is defined here to avoid a circular dependency.
+//
+// This API is EXPERIMENTAL.
+type DepthLoggerV2 interface {
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	InfoDepth(depth int, args ...interface{})
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	WarningDepth(depth int, args ...interface{})
+	// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	ErrorDepth(depth int, args ...interface{})
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
+	FatalDepth(depth int, args ...interface{})
+}

+ 63 - 0
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go

@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+// PrefixLogger does logging with a prefix.
+//
+// Logging method on a nil logs without any prefix.
+type PrefixLogger struct {
+	prefix string
+}
+
+// Infof does info logging.
+func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
+	if pl != nil {
+		// Handle nil, so the tests can pass in a nil logger.
+		format = pl.prefix + format
+	}
+	Logger.Infof(format, args...)
+}
+
+// Warningf does warning logging.
+func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
+	if pl != nil {
+		format = pl.prefix + format
+	}
+	Logger.Warningf(format, args...)
+}
+
+// Errorf does error logging.
+func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
+	if pl != nil {
+		format = pl.prefix + format
+	}
+	Logger.Errorf(format, args...)
+}
+
+// Debugf does info logging at verbose level 2.
+func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
+	if Logger.V(2) {
+		pl.Infof(format, args...)
+	}
+}
+
+// NewPrefixLogger creates a prefix logger with the given prefix.
+func NewPrefixLogger(prefix string) *PrefixLogger {
+	return &PrefixLogger{prefix: prefix}
+}

+ 55 - 0
vendor/google.golang.org/grpc/internal/grpcutil/target.go

@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcutil provides a bunch of utility functions to be used across the
+// gRPC codebase.
+package grpcutil
+
+import (
+	"strings"
+
+	"google.golang.org/grpc/resolver"
+)
+
+// split2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", "", false) instead.
+func split2(s, sep string) (string, string, bool) {
+	spl := strings.SplitN(s, sep, 2)
+	if len(spl) < 2 {
+		return "", "", false
+	}
+	return spl[0], spl[1], true
+}
+
+// ParseTarget splits target into a resolver.Target struct containing scheme,
+// authority and endpoint.
+//
+// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
+// target}.
+func ParseTarget(target string) (ret resolver.Target) {
+	var ok bool
+	ret.Scheme, ret.Endpoint, ok = split2(target, "://")
+	if !ok {
+		return resolver.Target{Endpoint: target}
+	}
+	ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
+	if !ok {
+		return resolver.Target{Endpoint: target}
+	}
+	return ret
+}

+ 8 - 7
vendor/google.golang.org/grpc/internal/internal.go

@@ -28,9 +28,7 @@ import (
 )
 
 var (
-	// WithResolverBuilder is exported by dialoptions.go
-	WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
-	// WithHealthCheckFunc is not exported by dialoptions.go
+	// WithHealthCheckFunc is set by dialoptions.go
 	WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
 	// HealthCheckFunc is used to provide client-side LB channel health checking
 	HealthCheckFunc HealthChecker
@@ -39,14 +37,17 @@ var (
 	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
 	// default, but tests may wish to set it lower for convenience.
 	KeepaliveMinPingTime = 10 * time.Second
-	// ParseServiceConfig is a function to parse JSON service configs into
-	// opaque data structures.
-	ParseServiceConfig func(sc string) (interface{}, error)
 	// StatusRawProto is exported by status/status.go. This func returns a
 	// pointer to the wrapped Status proto for a given status.Status without a
 	// call to proto.Clone(). The returned Status proto should not be mutated by
 	// the caller.
 	StatusRawProto interface{} // func (*status.Status) *spb.Status
+	// NewRequestInfoContext creates a new context based on the argument context attaching
+	// the passed in RequestInfo to the new context.
+	NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
+	// ParseServiceConfigForTesting is for creating a fake
+	// ClientConn for resolver testing only
+	ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
 )
 
 // HealthChecker defines the signature of the client-side LB channel health checking function.
@@ -57,7 +58,7 @@ var (
 //
 // The health checking protocol is defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
+type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
 
 const (
 	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.

+ 99 - 115
vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go → vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go

@@ -33,18 +33,22 @@ import (
 	"time"
 
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/envconfig"
 	"google.golang.org/grpc/internal/grpcrand"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
 )
 
+// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+// addresses from SRV records.  Must not be changed after init time.
+var EnableSRVLookups = false
+
 func init() {
 	resolver.Register(NewBuilder())
 }
 
 const (
 	defaultPort       = "443"
-	defaultFreq       = time.Minute * 30
 	defaultDNSSvrPort = "53"
 	golang            = "GO"
 	// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
@@ -94,47 +98,33 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
 
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 func NewBuilder() resolver.Builder {
-	return &dnsBuilder{minFreq: defaultFreq}
+	return &dnsBuilder{}
 }
 
-type dnsBuilder struct {
-	// minimum frequency of polling the DNS server.
-	minFreq time.Duration
-}
+type dnsBuilder struct{}
 
 // Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
 	host, port, err := parseTarget(target.Endpoint, defaultPort)
 	if err != nil {
 		return nil, err
 	}
 
 	// IP address.
-	if net.ParseIP(host) != nil {
-		host, _ = formatIP(host)
-		addr := []resolver.Address{{Addr: host + ":" + port}}
-		i := &ipResolver{
-			cc: cc,
-			ip: addr,
-			rn: make(chan struct{}, 1),
-			q:  make(chan struct{}),
-		}
-		cc.NewAddress(addr)
-		go i.watcher()
-		return i, nil
+	if ipAddr, ok := formatIP(host); ok {
+		addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+		cc.UpdateState(resolver.State{Addresses: addr})
+		return deadResolver{}, nil
 	}
 
 	// DNS address (non-IP).
 	ctx, cancel := context.WithCancel(context.Background())
 	d := &dnsResolver{
-		freq:                 b.minFreq,
-		backoff:              backoff.Exponential{MaxDelay: b.minFreq},
 		host:                 host,
 		port:                 port,
 		ctx:                  ctx,
 		cancel:               cancel,
 		cc:                   cc,
-		t:                    time.NewTimer(0),
 		rn:                   make(chan struct{}, 1),
 		disableServiceConfig: opts.DisableServiceConfig,
 	}
@@ -150,6 +140,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
 
 	d.wg.Add(1)
 	go d.watcher()
+	d.ResolveNow(resolver.ResolveNowOptions{})
 	return d, nil
 }
 
@@ -164,53 +155,23 @@ type netResolver interface {
 	LookupTXT(ctx context.Context, name string) (txts []string, err error)
 }
 
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
-	cc resolver.ClientConn
-	ip []resolver.Address
-	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
-	rn chan struct{}
-	q  chan struct{}
-}
-
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
-	select {
-	case i.rn <- struct{}{}:
-	default:
-	}
-}
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
 
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
-	close(i.q)
-}
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
 
-func (i *ipResolver) watcher() {
-	for {
-		select {
-		case <-i.rn:
-			i.cc.NewAddress(i.ip)
-		case <-i.q:
-			return
-		}
-	}
-}
+func (deadResolver) Close() {}
 
 // dnsResolver watches for the name resolution update for a non-IP target.
 type dnsResolver struct {
-	freq       time.Duration
-	backoff    backoff.Exponential
-	retryCount int
-	host       string
-	port       string
-	resolver   netResolver
-	ctx        context.Context
-	cancel     context.CancelFunc
-	cc         resolver.ClientConn
+	host     string
+	port     string
+	resolver netResolver
+	ctx      context.Context
+	cancel   context.CancelFunc
+	cc       resolver.ClientConn
 	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
 	rn chan struct{}
-	t  *time.Timer
 	// wg is used to enforce Close() to return after the watcher() goroutine has finished.
 	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
 	// replace the real lookup functions with mocked ones to facilitate testing.
@@ -222,7 +183,7 @@ type dnsResolver struct {
 }
 
 // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
 	select {
 	case d.rn <- struct{}{}:
 	default:
@@ -233,7 +194,6 @@ func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
 func (d *dnsResolver) Close() {
 	d.cancel()
 	d.wg.Wait()
-	d.t.Stop()
 }
 
 func (d *dnsResolver) watcher() {
@@ -242,27 +202,15 @@ func (d *dnsResolver) watcher() {
 		select {
 		case <-d.ctx.Done():
 			return
-		case <-d.t.C:
 		case <-d.rn:
-			if !d.t.Stop() {
-				// Before resetting a timer, it should be stopped to prevent racing with
-				// reads on it's channel.
-				<-d.t.C
-			}
 		}
 
-		result, sc := d.lookup()
-		// Next lookup should happen within an interval defined by d.freq. It may be
-		// more often due to exponential retry on empty address list.
-		if len(result) == 0 {
-			d.retryCount++
-			d.t.Reset(d.backoff.Backoff(d.retryCount))
+		state, err := d.lookup()
+		if err != nil {
+			d.cc.ReportError(err)
 		} else {
-			d.retryCount = 0
-			d.t.Reset(d.freq)
+			d.cc.UpdateState(*state)
 		}
-		d.cc.NewServiceConfig(sc)
-		d.cc.NewAddress(result)
 
 		// Sleep to prevent excessive re-resolutions. Incoming resolution requests
 		// will be queued in d.rn.
@@ -276,37 +224,68 @@ func (d *dnsResolver) watcher() {
 	}
 }
 
-func (d *dnsResolver) lookupSRV() []resolver.Address {
+func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
+	if !EnableSRVLookups {
+		return nil, nil
+	}
 	var newAddrs []resolver.Address
 	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
 	if err != nil {
-		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
-		return nil
+		err = handleDNSError(err, "SRV") // may become nil
+		return nil, err
 	}
 	for _, s := range srvs {
 		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
 		if err != nil {
-			grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
-			continue
+			err = handleDNSError(err, "A") // may become nil
+			if err == nil {
+				// If there are other SRV records, look them up and ignore this
+				// one that does not exist.
+				continue
+			}
+			return nil, err
 		}
 		for _, a := range lbAddrs {
-			a, ok := formatIP(a)
+			ip, ok := formatIP(a)
 			if !ok {
-				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-				continue
+				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
 			}
-			addr := a + ":" + strconv.Itoa(int(s.Port))
+			addr := ip + ":" + strconv.Itoa(int(s.Port))
 			newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
 		}
 	}
-	return newAddrs
+	return newAddrs, nil
+}
+
+var filterError = func(err error) error {
+	if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+		// Timeouts and temporary errors should be communicated to gRPC to
+		// attempt another DNS query (with backoff).  Other errors should be
+		// suppressed (they may represent the absence of a TXT record).
+		return nil
+	}
+	return err
+}
+
+func handleDNSError(err error, lookupType string) error {
+	err = filterError(err)
+	if err != nil {
+		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+		grpclog.Infoln(err)
+	}
+	return err
 }
 
-func (d *dnsResolver) lookupTXT() string {
+func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
 	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
 	if err != nil {
-		grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
-		return ""
+		if envconfig.TXTErrIgnore {
+			return nil
+		}
+		if err = handleDNSError(err, "TXT"); err != nil {
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		return nil
 	}
 	var res string
 	for _, s := range ss {
@@ -315,40 +294,45 @@ func (d *dnsResolver) lookupTXT() string {
 
 	// TXT record must have "grpc_config=" attribute in order to be used as service config.
 	if !strings.HasPrefix(res, txtAttribute) {
-		grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
-		return ""
+		grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+		// This is not an error; it is the equivalent of not having a service config.
+		return nil
 	}
-	return strings.TrimPrefix(res, txtAttribute)
+	sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+	return d.cc.ParseServiceConfig(sc)
 }
 
-func (d *dnsResolver) lookupHost() []resolver.Address {
+func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
 	var newAddrs []resolver.Address
 	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
 	if err != nil {
-		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
-		return nil
+		err = handleDNSError(err, "A")
+		return nil, err
 	}
 	for _, a := range addrs {
-		a, ok := formatIP(a)
+		ip, ok := formatIP(a)
 		if !ok {
-			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-			continue
+			return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
 		}
-		addr := a + ":" + d.port
+		addr := ip + ":" + d.port
 		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
 	}
-	return newAddrs
+	return newAddrs, nil
 }
 
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
-	newAddrs := d.lookupSRV()
-	// Support fallback to non-balancer address.
-	newAddrs = append(newAddrs, d.lookupHost()...)
-	if d.disableServiceConfig {
-		return newAddrs, ""
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+	srv, srvErr := d.lookupSRV()
+	addrs, hostErr := d.lookupHost()
+	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+		return nil, hostErr
+	}
+	state := &resolver.State{
+		Addresses: append(addrs, srv...),
+	}
+	if !d.disableServiceConfig {
+		state.ServiceConfig = d.lookupTXT()
 	}
-	sc := d.lookupTXT()
-	return newAddrs, canaryingSC(sc)
+	return state, nil
 }
 
 // formatIP returns ok = false if addr is not a valid textual representation of an IP address.
@@ -434,12 +418,12 @@ func canaryingSC(js string) string {
 	var rcs []rawChoice
 	err := json.Unmarshal([]byte(js), &rcs)
 	if err != nil {
-		grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+		grpclog.Warningf("dns: error parsing service config json: %v", err)
 		return ""
 	}
 	cliHostname, err := os.Hostname()
 	if err != nil {
-		grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+		grpclog.Warningf("dns: error getting client hostname: %v", err)
 		return ""
 	}
 	var sc string

+ 33 - 0
vendor/google.golang.org/grpc/internal/resolver/dns/go113.go

@@ -0,0 +1,33 @@
+// +build go1.13
+
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package dns
+
+import "net"
+
+func init() {
+	filterError = func(err error) error {
+		if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
+			// The name does not exist; not an error.
+			return nil
+		}
+		return err
+	}
+}

+ 2 - 2
vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go → vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go

@@ -26,7 +26,7 @@ const scheme = "passthrough"
 
 type passthroughBuilder struct{}
 
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
 	r := &passthroughResolver{
 		target: target,
 		cc:     cc,
@@ -48,7 +48,7 @@ func (r *passthroughResolver) start() {
 	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
 }
 
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
 
 func (*passthroughResolver) Close() {}
 

+ 4 - 8
vendor/google.golang.org/grpc/internal/transport/controlbuf.go

@@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false }
 type headerFrame struct {
 	streamID   uint32
 	hf         []hpack.HeaderField
-	endStream  bool                       // Valid on server side.
-	initStream func(uint32) (bool, error) // Used only on the client side.
+	endStream  bool               // Valid on server side.
+	initStream func(uint32) error // Used only on the client side.
 	onWrite    func()
 	wq         *writeQuota    // write quota for the stream created.
 	cleanup    *cleanupStream // Valid on the server side.
@@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
 
 func (l *loopyWriter) originateStream(str *outStream) error {
 	hdr := str.itl.dequeue().(*headerFrame)
-	sendPing, err := hdr.initStream(str.id)
-	if err != nil {
+	if err := hdr.initStream(str.id); err != nil {
 		if err == ErrConnClosing {
 			return err
 		}
 		// Other errors(errStreamDrain) need not close transport.
 		return nil
 	}
-	if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
+	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
 		return err
 	}
 	l.estdStreams[str.id] = str
-	if sendPing {
-		return l.pingHandler(&ping{data: [8]byte{}})
-	}
 	return nil
 }
 

+ 7 - 3
vendor/google.golang.org/grpc/internal/transport/handler_server.go

@@ -227,7 +227,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
 
 	if err == nil { // transport has not been closed
 		if ht.stats != nil {
-			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+				Trailer: s.trailer.Copy(),
+			})
 		}
 	}
 	ht.Close()
@@ -289,7 +291,9 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
 
 	if err == nil {
 		if ht.stats != nil {
-			ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
+			ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+				Header: md.Copy(),
+			})
 		}
 	}
 	return err
@@ -334,7 +338,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
 		Addr: ht.RemoteAddr(),
 	}
 	if req.TLS != nil {
-		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
+		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
 	}
 	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
 	s.ctx = peer.NewContext(ctx, pr)

+ 128 - 80
vendor/google.golang.org/grpc/internal/transport/http2_client.go

@@ -35,6 +35,7 @@ import (
 
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/syscall"
 	"google.golang.org/grpc/keepalive"
@@ -44,8 +45,14 @@ import (
 	"google.golang.org/grpc/status"
 )
 
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
 // http2Client implements the ClientTransport interface with HTTP2.
 type http2Client struct {
+	lastRead   int64 // Keep this field 64-bit aligned. Accessed atomically.
 	ctx        context.Context
 	cancel     context.CancelFunc
 	ctxDone    <-chan struct{} // Cache the ctx.Done() chan.
@@ -62,8 +69,6 @@ type http2Client struct {
 	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
 	// that the server sent GoAway on this transport.
 	goAway chan struct{}
-	// awakenKeepalive is used to wake up keepalive when after it has gone dormant.
-	awakenKeepalive chan struct{}
 
 	framer *framer
 	// controlBuf delivers all the control related tasks (e.g., window
@@ -77,9 +82,6 @@ type http2Client struct {
 
 	perRPCCreds []credentials.PerRPCCredentials
 
-	// Boolean to keep track of reading activity on transport.
-	// 1 is true and 0 is false.
-	activity         uint32 // Accessed atomically.
 	kp               keepalive.ClientParameters
 	keepaliveEnabled bool
 
@@ -110,6 +112,16 @@ type http2Client struct {
 	// goAwayReason records the http2.ErrCode and debug data received with the
 	// GoAway frame.
 	goAwayReason GoAwayReason
+	// A condition variable used to signal when the keepalive goroutine should
+	// go dormant. The condition for dormancy is based on the number of active
+	// streams and the `PermitWithoutStream` keepalive client parameter. And
+	// since the number of active streams is guarded by the above mutex, we use
+	// the same for this condition variable as well.
+	kpDormancyCond *sync.Cond
+	// A boolean to track whether the keepalive goroutine is dormant or not.
+	// This is checked before attempting to signal the above condition
+	// variable.
+	kpDormant bool
 
 	// Fields below are for channelz metric collection.
 	channelzID int64 // channelz unique identification number
@@ -119,6 +131,8 @@ type http2Client struct {
 	onClose  func()
 
 	bufferPool *bufferPool
+
+	connectionID uint64
 }
 
 func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@@ -232,7 +246,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 		readerDone:            make(chan struct{}),
 		writerDone:            make(chan struct{}),
 		goAway:                make(chan struct{}),
-		awakenKeepalive:       make(chan struct{}, 1),
 		framer:                newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
 		fc:                    &trInFlow{limit: uint32(icwz)},
 		scheme:                scheme,
@@ -264,9 +277,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 			updateFlowControl: t.updateFlowControl,
 		}
 	}
-	// Make sure awakenKeepalive can't be written upon.
-	// keepalive routine will make it writable, if need be.
-	t.awakenKeepalive <- struct{}{}
 	if t.statsHandler != nil {
 		t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
 			RemoteAddr: t.remoteAddr,
@@ -281,6 +291,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 		t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
 	}
 	if t.keepaliveEnabled {
+		t.kpDormancyCond = sync.NewCond(&t.mu)
 		go t.keepalive()
 	}
 	// Start the reader goroutine for incoming message. Each transport has
@@ -325,6 +336,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 		}
 	}
 
+	t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
 	if err := t.framer.writer.Flush(); err != nil {
 		return nil, err
 	}
@@ -347,6 +360,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
 func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
 	s := &Stream{
+		ct:             t,
 		done:           make(chan struct{}),
 		method:         callHdr.Method,
 		sendCompress:   callHdr.SendCompress,
@@ -380,23 +394,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
 }
 
 func (t *http2Client) getPeer() *peer.Peer {
-	pr := &peer.Peer{
-		Addr: t.remoteAddr,
+	return &peer.Peer{
+		Addr:     t.remoteAddr,
+		AuthInfo: t.authInfo,
 	}
-	// Attach Auth info if there is any.
-	if t.authInfo != nil {
-		pr.AuthInfo = t.authInfo
-	}
-	return pr
 }
 
 func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
 	aud := t.createAudience(callHdr)
-	authData, err := t.getTrAuthData(ctx, aud)
+	ri := credentials.RequestInfo{
+		Method:   callHdr.Method,
+		AuthInfo: t.authInfo,
+	}
+	ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
 	if err != nil {
 		return nil, err
 	}
-	callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
+	callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
 	if err != nil {
 		return nil, err
 	}
@@ -419,6 +434,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
 
 	if callHdr.SendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
 	}
 	if dl, ok := ctx.Deadline(); ok {
 		// Send out timeout regardless its value. The server can detect timeout context by itself.
@@ -564,7 +580,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 	hdr := &headerFrame{
 		hf:        headerFields,
 		endStream: false,
-		initStream: func(id uint32) (bool, error) {
+		initStream: func(id uint32) error {
 			t.mu.Lock()
 			if state := t.state; state != reachable {
 				t.mu.Unlock()
@@ -574,29 +590,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 					err = ErrConnClosing
 				}
 				cleanup(err)
-				return false, err
+				return err
 			}
 			t.activeStreams[id] = s
 			if channelz.IsOn() {
 				atomic.AddInt64(&t.czData.streamsStarted, 1)
 				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
 			}
-			var sendPing bool
-			// If the number of active streams change from 0 to 1, then check if keepalive
-			// has gone dormant. If so, wake it up.
-			if len(t.activeStreams) == 1 && t.keepaliveEnabled {
-				select {
-				case t.awakenKeepalive <- struct{}{}:
-					sendPing = true
-					// Fill the awakenKeepalive channel again as this channel must be
-					// kept non-writable except at the point that the keepalive()
-					// goroutine is waiting either to be awaken or shutdown.
-					t.awakenKeepalive <- struct{}{}
-				default:
-				}
+			// If the keepalive goroutine has gone dormant, wake it up.
+			if t.kpDormant {
+				t.kpDormancyCond.Signal()
 			}
 			t.mu.Unlock()
-			return sendPing, nil
+			return nil
 		},
 		onOrphaned: cleanup,
 		wq:         s.wq,
@@ -674,12 +680,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 		}
 	}
 	if t.statsHandler != nil {
+		header, ok := metadata.FromOutgoingContext(ctx)
+		if ok {
+			header.Set("user-agent", t.userAgent)
+		} else {
+			header = metadata.Pairs("user-agent", t.userAgent)
+		}
 		outHeader := &stats.OutHeader{
 			Client:      true,
 			FullMethod:  callHdr.Method,
 			RemoteAddr:  t.remoteAddr,
 			LocalAddr:   t.localAddr,
 			Compression: callHdr.SendCompress,
+			Header:      header,
 		}
 		t.statsHandler.HandleRPC(s.ctx, outHeader)
 	}
@@ -778,6 +791,11 @@ func (t *http2Client) Close() error {
 	t.state = closing
 	streams := t.activeStreams
 	t.activeStreams = nil
+	if t.kpDormant {
+		// If the keepalive goroutine is blocked on this condition variable, we
+		// should unblock it so that the goroutine eventually exits.
+		t.kpDormancyCond.Signal()
+	}
 	t.mu.Unlock()
 	t.controlBuf.finish()
 	t.cancel()
@@ -853,11 +871,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	return t.controlBuf.put(df)
 }
 
-func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
+func (t *http2Client) getStream(f http2.Frame) *Stream {
 	t.mu.Lock()
-	defer t.mu.Unlock()
-	s, ok := t.activeStreams[f.Header().StreamID]
-	return s, ok
+	s := t.activeStreams[f.Header().StreamID]
+	t.mu.Unlock()
+	return s
 }
 
 // adjustWindow sends out extra window update over the initial window size
@@ -937,8 +955,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
 		t.controlBuf.put(bdpPing)
 	}
 	// Select the right stream to dispatch.
-	s, ok := t.getStream(f)
-	if !ok {
+	s := t.getStream(f)
+	if s == nil {
 		return
 	}
 	if size > 0 {
@@ -969,8 +987,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
 }
 
 func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
-	s, ok := t.getStream(f)
-	if !ok {
+	s := t.getStream(f)
+	if s == nil {
 		return
 	}
 	if f.ErrCode == http2.ErrCodeRefusedStream {
@@ -1147,8 +1165,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
 
 // operateHeaders takes action on the decoded headers.
 func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
-	s, ok := t.getStream(frame)
-	if !ok {
+	s := t.getStream(frame)
+	if s == nil {
 		return
 	}
 	endStream := frame.StreamEnded()
@@ -1177,12 +1195,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
 				inHeader := &stats.InHeader{
 					Client:     true,
 					WireLength: int(frame.Header().Length),
+					Header:     s.header.Copy(),
 				}
 				t.statsHandler.HandleRPC(s.ctx, inHeader)
 			} else {
 				inTrailer := &stats.InTrailer{
 					Client:     true,
 					WireLength: int(frame.Header().Length),
+					Trailer:    s.trailer.Copy(),
 				}
 				t.statsHandler.HandleRPC(s.ctx, inTrailer)
 			}
@@ -1191,6 +1211,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
 
 	// If headerChan hasn't been closed yet
 	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+		s.headerValid = true
 		if !endStream {
 			// HEADERS frame block carries a Response-Headers.
 			isHeader = true
@@ -1233,7 +1254,7 @@ func (t *http2Client) reader() {
 	}
 	t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
 	if t.keepaliveEnabled {
-		atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 	}
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
@@ -1248,7 +1269,7 @@ func (t *http2Client) reader() {
 		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
 		if t.keepaliveEnabled {
-			atomic.CompareAndSwapUint32(&t.activity, 0, 1)
+			atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 		}
 		if err != nil {
 			// Abort an active stream if the http2.Framer returns a
@@ -1292,56 +1313,83 @@ func (t *http2Client) reader() {
 	}
 }
 
+func minTime(a, b time.Duration) time.Duration {
+	if a < b {
+		return a
+	}
+	return b
+}
+
 // keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
 func (t *http2Client) keepalive() {
 	p := &ping{data: [8]byte{}}
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	timeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
 	timer := time.NewTimer(t.kp.Time)
 	for {
 		select {
 		case <-timer.C:
-			if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-				timer.Reset(t.kp.Time)
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were here.
+				outstandingPing = false
+				// Next timer should fire at kp.Time seconds from lastRead time.
+				timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
 				continue
 			}
-			// Check if keepalive should go dormant.
+			if outstandingPing && timeoutLeft <= 0 {
+				t.Close()
+				return
+			}
 			t.mu.Lock()
-			if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
-				// Make awakenKeepalive writable.
-				<-t.awakenKeepalive
-				t.mu.Unlock()
-				select {
-				case <-t.awakenKeepalive:
-					// If the control gets here a ping has been sent
-					// need to reset the timer with keepalive.Timeout.
-				case <-t.ctx.Done():
-					return
-				}
-			} else {
+			if t.state == closing {
+				// If the transport is closing, we should exit from the
+				// keepalive goroutine here. If not, we could have a race
+				// between the call to Signal() from Close() and the call to
+				// Wait() here, whereby the keepalive goroutine ends up
+				// blocking on the condition variable which will never be
+				// signalled again.
 				t.mu.Unlock()
+				return
+			}
+			if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
+				// If a ping was sent out previously (because there were active
+				// streams at that point) which wasn't acked and its timeout
+				// hadn't fired, but we got here and are about to go dormant,
+				// we should make sure that we unconditionally send a ping once
+				// we awaken.
+				outstandingPing = false
+				t.kpDormant = true
+				t.kpDormancyCond.Wait()
+			}
+			t.kpDormant = false
+			t.mu.Unlock()
+
+			// We get here either because we were dormant and a new stream was
+			// created which unblocked the Wait() call, or because the
+			// keepalive timer expired. In both cases, we need to send a ping.
+			if !outstandingPing {
 				if channelz.IsOn() {
 					atomic.AddInt64(&t.czData.kpCount, 1)
 				}
-				// Send ping.
 				t.controlBuf.put(p)
+				timeoutLeft = t.kp.Timeout
+				outstandingPing = true
 			}
-
-			// By the time control gets here a ping has been sent one way or the other.
-			timer.Reset(t.kp.Timeout)
-			select {
-			case <-timer.C:
-				if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-					timer.Reset(t.kp.Time)
-					continue
-				}
-				infof("transport: closing client transport due to idleness.")
-				t.Close()
-				return
-			case <-t.ctx.Done():
-				if !timer.Stop() {
-					<-timer.C
-				}
-				return
-			}
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := minTime(t.kp.Time, timeoutLeft)
+			timeoutLeft -= sleepDuration
+			timer.Reset(sleepDuration)
 		case <-t.ctx.Done():
 			if !timer.Stop() {
 				<-timer.C

+ 91 - 61
vendor/google.golang.org/grpc/internal/transport/http2_server.go

@@ -62,11 +62,15 @@ var (
 	statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
 )
 
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
 // http2Server implements the ServerTransport interface with HTTP2.
 type http2Server struct {
+	lastRead    int64 // Keep this field 64-bit aligned. Accessed atomically.
 	ctx         context.Context
-	ctxDone     <-chan struct{} // Cache the context.Done() chan
-	cancel      context.CancelFunc
+	done        chan struct{}
 	conn        net.Conn
 	loopy       *loopyWriter
 	readerDone  chan struct{} // sync point to enable testing.
@@ -84,12 +88,8 @@ type http2Server struct {
 	controlBuf *controlBuffer
 	fc         *trInFlow
 	stats      stats.Handler
-	// Flag to keep track of reading activity on transport.
-	// 1 is true and 0 is false.
-	activity uint32 // Accessed atomically.
 	// Keepalive and max-age parameters for the server.
 	kp keepalive.ServerParameters
-
 	// Keepalive enforcement policy.
 	kep keepalive.EnforcementPolicy
 	// The time instance last ping was received.
@@ -125,6 +125,8 @@ type http2Server struct {
 	channelzID int64 // channelz unique identification number
 	czData     *channelzData
 	bufferPool *bufferPool
+
+	connectionID uint64
 }
 
 // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@@ -175,6 +177,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 			Val: *config.MaxHeaderListSize,
 		})
 	}
+	if config.HeaderTableSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingHeaderTableSize,
+			Val: *config.HeaderTableSize,
+		})
+	}
 	if err := framer.fr.WriteSettings(isettings...); err != nil {
 		return nil, connectionErrorf(false, err, "transport: %v", err)
 	}
@@ -206,11 +214,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 	if kep.MinTime == 0 {
 		kep.MinTime = defaultKeepalivePolicyMinTime
 	}
-	ctx, cancel := context.WithCancel(context.Background())
+	done := make(chan struct{})
 	t := &http2Server{
-		ctx:               ctx,
-		cancel:            cancel,
-		ctxDone:           ctx.Done(),
+		ctx:               context.Background(),
+		done:              done,
 		conn:              conn,
 		remoteAddr:        conn.RemoteAddr(),
 		localAddr:         conn.LocalAddr(),
@@ -231,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 		czData:            new(channelzData),
 		bufferPool:        newBufferPool(),
 	}
-	t.controlBuf = newControlBuffer(t.ctxDone)
+	t.controlBuf = newControlBuffer(t.done)
 	if dynamicWindow {
 		t.bdpEst = &bdpEstimator{
 			bdp:               initialWindowSize,
@@ -249,6 +256,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 	if channelz.IsOn() {
 		t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
 	}
+
+	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
+
 	t.framer.writer.Flush()
 
 	defer func() {
@@ -273,7 +283,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
 	if err != nil {
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
 	}
-	atomic.StoreUint32(&t.activity, 1)
+	atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
 		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@@ -362,12 +372,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 				rstCode:  http2.ErrCodeRefusedStream,
 				onWrite:  func() {},
 			})
+			s.cancel()
 			return false
 		}
 	}
 	t.mu.Lock()
 	if t.state != reachable {
 		t.mu.Unlock()
+		s.cancel()
 		return false
 	}
 	if uint32(len(t.activeStreams)) >= t.maxStreams {
@@ -378,12 +390,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 			rstCode:  http2.ErrCodeRefusedStream,
 			onWrite:  func() {},
 		})
+		s.cancel()
 		return false
 	}
 	if streamID%2 != 1 || streamID <= t.maxStreamID {
 		t.mu.Unlock()
 		// illegal gRPC stream id.
 		errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+		s.cancel()
 		return true
 	}
 	t.maxStreamID = streamID
@@ -408,6 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
 			LocalAddr:   t.localAddr,
 			Compression: s.recvCompress,
 			WireLength:  int(frame.Header().Length),
+			Header:      metadata.MD(state.data.mdata).Copy(),
 		}
 		t.stats.HandleRPC(s.ctx, inHeader)
 	}
@@ -441,7 +456,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
 	for {
 		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
-		atomic.StoreUint32(&t.activity, 1)
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 		if err != nil {
 			if se, ok := err.(http2.StreamError); ok {
 				warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
@@ -749,7 +764,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
 	return true
 }
 
-// WriteHeader sends the header metedata md back to the client.
+// WriteHeader sends the header metadata md back to the client.
 func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 	if s.updateHeaderSent() || s.getState() == streamDone {
 		return ErrIllegalHeaderWrite
@@ -800,7 +815,9 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
 	if t.stats != nil {
 		// Note: WireLength is not set in outHeader.
 		// TODO(mmukhi): Revisit this later, if needed.
-		outHeader := &stats.OutHeader{}
+		outHeader := &stats.OutHeader{
+			Header: s.header.Copy(),
+		}
 		t.stats.HandleRPC(s.Context(), outHeader)
 	}
 	return nil
@@ -863,7 +880,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
 	rst := s.getState() == streamActive
 	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
 	if t.stats != nil {
-		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+			Trailer: s.trailer.Copy(),
+		})
 	}
 	return nil
 }
@@ -885,7 +904,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 			// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
 			s.cancel()
 			select {
-			case <-t.ctx.Done():
+			case <-t.done:
 				return ErrConnClosing
 			default:
 			}
@@ -907,7 +926,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 	}
 	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
 		select {
-		case <-t.ctx.Done():
+		case <-t.done:
 			return ErrConnClosing
 		default:
 		}
@@ -924,32 +943,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
 // after an additional duration of keepalive.Timeout.
 func (t *http2Server) keepalive() {
 	p := &ping{}
-	var pingSent bool
-	maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
-	maxAge := time.NewTimer(t.kp.MaxConnectionAge)
-	keepalive := time.NewTimer(t.kp.Time)
-	// NOTE: All exit paths of this function should reset their
-	// respective timers. A failure to do so will cause the
-	// following clean-up to deadlock and eventually leak.
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	kpTimeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	// Initialize the different timers to their default values.
+	idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+	ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+	kpTimer := time.NewTimer(t.kp.Time)
 	defer func() {
-		if !maxIdle.Stop() {
-			<-maxIdle.C
-		}
-		if !maxAge.Stop() {
-			<-maxAge.C
-		}
-		if !keepalive.Stop() {
-			<-keepalive.C
-		}
+		// We need to drain the underlying channel in these timers after a call
+		// to Stop(), only if we are interested in resetting them. Clearly we
+		// are not interested in resetting them here.
+		idleTimer.Stop()
+		ageTimer.Stop()
+		kpTimer.Stop()
 	}()
+
 	for {
 		select {
-		case <-maxIdle.C:
+		case <-idleTimer.C:
 			t.mu.Lock()
 			idle := t.idle
 			if idle.IsZero() { // The connection is non-idle.
 				t.mu.Unlock()
-				maxIdle.Reset(t.kp.MaxConnectionIdle)
+				idleTimer.Reset(t.kp.MaxConnectionIdle)
 				continue
 			}
 			val := t.kp.MaxConnectionIdle - time.Since(idle)
@@ -958,44 +980,52 @@ func (t *http2Server) keepalive() {
 				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
 				// Gracefully close the connection.
 				t.drain(http2.ErrCodeNo, []byte{})
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				maxIdle.Reset(infinity)
 				return
 			}
-			maxIdle.Reset(val)
-		case <-maxAge.C:
+			idleTimer.Reset(val)
+		case <-ageTimer.C:
 			t.drain(http2.ErrCodeNo, []byte{})
-			maxAge.Reset(t.kp.MaxConnectionAgeGrace)
+			ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
 			select {
-			case <-maxAge.C:
+			case <-ageTimer.C:
 				// Close the connection after grace period.
 				infof("transport: closing server transport due to maximum connection age.")
 				t.Close()
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				maxAge.Reset(infinity)
-			case <-t.ctx.Done():
+			case <-t.done:
 			}
 			return
-		case <-keepalive.C:
-			if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-				pingSent = false
-				keepalive.Reset(t.kp.Time)
+		case <-kpTimer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were
+				// here. Setup the timer to fire at kp.Time seconds from
+				// lastRead time and continue.
+				outstandingPing = false
+				kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
 				continue
 			}
-			if pingSent {
+			if outstandingPing && kpTimeoutLeft <= 0 {
 				infof("transport: closing server transport due to idleness.")
 				t.Close()
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				keepalive.Reset(infinity)
 				return
 			}
-			pingSent = true
-			if channelz.IsOn() {
-				atomic.AddInt64(&t.czData.kpCount, 1)
+			if !outstandingPing {
+				if channelz.IsOn() {
+					atomic.AddInt64(&t.czData.kpCount, 1)
+				}
+				t.controlBuf.put(p)
+				kpTimeoutLeft = t.kp.Timeout
+				outstandingPing = true
 			}
-			t.controlBuf.put(p)
-			keepalive.Reset(t.kp.Timeout)
-		case <-t.ctx.Done():
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+			kpTimeoutLeft -= sleepDuration
+			kpTimer.Reset(sleepDuration)
+		case <-t.done:
 			return
 		}
 	}
@@ -1015,7 +1045,7 @@ func (t *http2Server) Close() error {
 	t.activeStreams = nil
 	t.mu.Unlock()
 	t.controlBuf.finish()
-	t.cancel()
+	close(t.done)
 	err := t.conn.Close()
 	if channelz.IsOn() {
 		channelz.RemoveEntry(t.channelzID)
@@ -1155,7 +1185,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
 		select {
 		case <-t.drainChan:
 		case <-timer.C:
-		case <-t.ctx.Done():
+		case <-t.done:
 			return
 		}
 		t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
@@ -1205,7 +1235,7 @@ func (t *http2Server) getOutFlowWindow() int64 {
 	select {
 	case sz := <-resp:
 		return int64(sz)
-	case <-t.ctxDone:
+	case <-t.done:
 		return -1
 	case <-timer.C:
 		return -2

+ 31 - 39
vendor/google.golang.org/grpc/internal/transport/transport.go

@@ -73,10 +73,11 @@ type recvMsg struct {
 }
 
 // recvBuffer is an unbounded channel of recvMsg structs.
-// Note recvBuffer differs from controlBuffer only in that recvBuffer
-// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
-// recvBuffer is written to much more often than
-// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
 type recvBuffer struct {
 	c       chan recvMsg
 	mu      sync.Mutex
@@ -233,6 +234,7 @@ const (
 type Stream struct {
 	id           uint32
 	st           ServerTransport    // nil for client side Stream
+	ct           *http2Client       // nil for server side Stream
 	ctx          context.Context    // the associated context of the stream
 	cancel       context.CancelFunc // always nil for client side Stream
 	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
@@ -251,6 +253,10 @@ type Stream struct {
 
 	headerChan       chan struct{} // closed to indicate the end of header metadata.
 	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	// headerValid indicates whether a valid header was received.  Only
+	// meaningful after headerChan is closed (always call waitOnHeader() before
+	// reading its value).  Not valid on server side.
+	headerValid bool
 
 	// hdrMu protects header and trailer metadata on the server-side.
 	hdrMu sync.Mutex
@@ -303,34 +309,28 @@ func (s *Stream) getState() streamState {
 	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
 }
 
-func (s *Stream) waitOnHeader() error {
+func (s *Stream) waitOnHeader() {
 	if s.headerChan == nil {
 		// On the server headerChan is always nil since a stream originates
 		// only after having received headers.
-		return nil
+		return
 	}
 	select {
 	case <-s.ctx.Done():
-		// We prefer success over failure when reading messages because we delay
-		// context error in stream.Read(). To keep behavior consistent, we also
-		// prefer success here.
-		select {
-		case <-s.headerChan:
-			return nil
-		default:
-		}
-		return ContextErr(s.ctx.Err())
+		// Close the stream to prevent headers/trailers from changing after
+		// this function returns.
+		s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
+		// headerChan could possibly not be closed yet if closeStream raced
+		// with operateHeaders; wait until it is closed explicitly here.
+		<-s.headerChan
 	case <-s.headerChan:
-		return nil
 	}
 }
 
 // RecvCompress returns the compression algorithm applied to the inbound
 // message. It is empty string if there is no compression applied.
 func (s *Stream) RecvCompress() string {
-	if err := s.waitOnHeader(); err != nil {
-		return ""
-	}
+	s.waitOnHeader()
 	return s.recvCompress
 }
 
@@ -351,36 +351,27 @@ func (s *Stream) Done() <-chan struct{} {
 // available. It blocks until i) the metadata is ready or ii) there is no header
 // metadata or iii) the stream is canceled/expired.
 //
-// On server side, it returns the out header after t.WriteHeader is called.
+// On server side, it returns the out header after t.WriteHeader is called.  It
+// does not block and must not be called until after WriteHeader.
 func (s *Stream) Header() (metadata.MD, error) {
-	if s.headerChan == nil && s.header != nil {
+	if s.headerChan == nil {
 		// On server side, return the header in stream. It will be the out
 		// header after t.WriteHeader is called.
 		return s.header.Copy(), nil
 	}
-	err := s.waitOnHeader()
-	// Even if the stream is closed, header is returned if available.
-	select {
-	case <-s.headerChan:
-		if s.header == nil {
-			return nil, nil
-		}
-		return s.header.Copy(), nil
-	default:
+	s.waitOnHeader()
+	if !s.headerValid {
+		return nil, s.status.Err()
 	}
-	return nil, err
+	return s.header.Copy(), nil
 }
 
 // TrailersOnly blocks until a header or trailers-only frame is received and
 // then returns true if the stream was trailers-only.  If the stream ends
-// before headers are received, returns true, nil.  If a context error happens
-// first, returns it as a status error.  Client-side only.
-func (s *Stream) TrailersOnly() (bool, error) {
-	err := s.waitOnHeader()
-	if err != nil {
-		return false, err
-	}
-	return s.noHeaders, nil
+// before headers are received, returns true, nil.  Client-side only.
+func (s *Stream) TrailersOnly() bool {
+	s.waitOnHeader()
+	return s.noHeaders
 }
 
 // Trailer returns the cached trailer metedata. Note that if it is not called
@@ -534,6 +525,7 @@ type ServerConfig struct {
 	ReadBufferSize        int
 	ChannelzParentID      int64
 	MaxHeaderListSize     *uint32
+	HeaderTableSize       *uint32
 }
 
 // NewServerTransport creates a ServerTransport with conn or non-nil error

+ 102 - 70
vendor/google.golang.org/grpc/picker_wrapper.go

@@ -20,6 +20,7 @@ package grpc
 
 import (
 	"context"
+	"fmt"
 	"io"
 	"sync"
 
@@ -31,49 +32,78 @@ import (
 	"google.golang.org/grpc/status"
 )
 
+// v2PickerWrapper wraps a balancer.Picker while providing the
+// balancer.V2Picker API.  It requires a pickerWrapper to generate errors
+// including the latest connectionError.  To be deleted when balancer.Picker is
+// updated to the balancer.V2Picker API.
+type v2PickerWrapper struct {
+	picker  balancer.Picker
+	connErr *connErr
+}
+
+func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	sc, done, err := v.picker.Pick(info.Ctx, info)
+	if err != nil {
+		if err == balancer.ErrTransientFailure {
+			return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
+		}
+		return balancer.PickResult{}, err
+	}
+	return balancer.PickResult{SubConn: sc, Done: done}, nil
+}
+
 // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
 // actions and unblock when there's a picker update.
 type pickerWrapper struct {
 	mu         sync.Mutex
 	done       bool
 	blockingCh chan struct{}
-	picker     balancer.Picker
+	picker     balancer.V2Picker
 
-	// The latest connection happened.
-	connErrMu sync.Mutex
-	connErr   error
+	// The latest connection error.  TODO: remove when V1 picker is deprecated;
+	// balancer should be responsible for providing the error.
+	*connErr
 }
 
-func newPickerWrapper() *pickerWrapper {
-	bp := &pickerWrapper{blockingCh: make(chan struct{})}
-	return bp
+type connErr struct {
+	mu  sync.Mutex
+	err error
 }
 
-func (bp *pickerWrapper) updateConnectionError(err error) {
-	bp.connErrMu.Lock()
-	bp.connErr = err
-	bp.connErrMu.Unlock()
+func (c *connErr) updateConnectionError(err error) {
+	c.mu.Lock()
+	c.err = err
+	c.mu.Unlock()
 }
 
-func (bp *pickerWrapper) connectionError() error {
-	bp.connErrMu.Lock()
-	err := bp.connErr
-	bp.connErrMu.Unlock()
+func (c *connErr) connectionError() error {
+	c.mu.Lock()
+	err := c.err
+	c.mu.Unlock()
 	return err
 }
 
+func newPickerWrapper() *pickerWrapper {
+	return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
+}
+
 // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
-	bp.mu.Lock()
-	if bp.done {
-		bp.mu.Unlock()
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+	pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
+}
+
+// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
+	pw.mu.Lock()
+	if pw.done {
+		pw.mu.Unlock()
 		return
 	}
-	bp.picker = p
-	// bp.blockingCh should never be nil.
-	close(bp.blockingCh)
-	bp.blockingCh = make(chan struct{})
-	bp.mu.Unlock()
+	pw.picker = p
+	// pw.blockingCh should never be nil.
+	close(pw.blockingCh)
+	pw.blockingCh = make(chan struct{})
+	pw.mu.Unlock()
 }
 
 func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
@@ -100,83 +130,85 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
 // - the current picker returns other errors and failfast is false.
 // - the subConn returned by the current picker is not READY
 // When one of these situations happens, pick blocks until the picker gets updated.
-func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
 	var ch chan struct{}
 
+	var lastPickErr error
 	for {
-		bp.mu.Lock()
-		if bp.done {
-			bp.mu.Unlock()
+		pw.mu.Lock()
+		if pw.done {
+			pw.mu.Unlock()
 			return nil, nil, ErrClientConnClosing
 		}
 
-		if bp.picker == nil {
-			ch = bp.blockingCh
+		if pw.picker == nil {
+			ch = pw.blockingCh
 		}
-		if ch == bp.blockingCh {
+		if ch == pw.blockingCh {
 			// This could happen when either:
-			// - bp.picker is nil (the previous if condition), or
+			// - pw.picker is nil (the previous if condition), or
 			// - has called pick on the current picker.
-			bp.mu.Unlock()
+			pw.mu.Unlock()
 			select {
 			case <-ctx.Done():
-				if connectionErr := bp.connectionError(); connectionErr != nil {
-					switch ctx.Err() {
-					case context.DeadlineExceeded:
-						return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
-					case context.Canceled:
-						return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
-					}
+				var errStr string
+				if lastPickErr != nil {
+					errStr = "latest balancer error: " + lastPickErr.Error()
+				} else if connectionErr := pw.connectionError(); connectionErr != nil {
+					errStr = "latest connection error: " + connectionErr.Error()
+				} else {
+					errStr = ctx.Err().Error()
+				}
+				switch ctx.Err() {
+				case context.DeadlineExceeded:
+					return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
+				case context.Canceled:
+					return nil, nil, status.Error(codes.Canceled, errStr)
 				}
-				return nil, nil, ctx.Err()
 			case <-ch:
 			}
 			continue
 		}
 
-		ch = bp.blockingCh
-		p := bp.picker
-		bp.mu.Unlock()
+		ch = pw.blockingCh
+		p := pw.picker
+		pw.mu.Unlock()
 
-		subConn, done, err := p.Pick(ctx, opts)
+		pickResult, err := p.Pick(info)
 
 		if err != nil {
-			switch err {
-			case balancer.ErrNoSubConnAvailable:
+			if err == balancer.ErrNoSubConnAvailable {
 				continue
-			case balancer.ErrTransientFailure:
+			}
+			if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
 				if !failfast {
+					lastPickErr = err
 					continue
 				}
-				return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
-			case context.DeadlineExceeded:
-				return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
-			case context.Canceled:
-				return nil, nil, status.Error(codes.Canceled, err.Error())
-			default:
-				if _, ok := status.FromError(err); ok {
-					return nil, nil, err
-				}
-				// err is some other error.
-				return nil, nil, status.Error(codes.Unknown, err.Error())
+				return nil, nil, status.Error(codes.Unavailable, err.Error())
 			}
+			if _, ok := status.FromError(err); ok {
+				return nil, nil, err
+			}
+			// err is some other error.
+			return nil, nil, status.Error(codes.Unknown, err.Error())
 		}
 
-		acw, ok := subConn.(*acBalancerWrapper)
+		acw, ok := pickResult.SubConn.(*acBalancerWrapper)
 		if !ok {
 			grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
 			continue
 		}
 		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
 			if channelz.IsOn() {
-				return t, doneChannelzWrapper(acw, done), nil
+				return t, doneChannelzWrapper(acw, pickResult.Done), nil
 			}
-			return t, done, nil
+			return t, pickResult.Done, nil
 		}
-		if done != nil {
+		if pickResult.Done != nil {
 			// Calling done with nil error, no bytes sent and no bytes received.
 			// DoneInfo with default value works.
-			done(balancer.DoneInfo{})
+			pickResult.Done(balancer.DoneInfo{})
 		}
 		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.
@@ -186,12 +218,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
 	}
 }
 
-func (bp *pickerWrapper) close() {
-	bp.mu.Lock()
-	defer bp.mu.Unlock()
-	if bp.done {
+func (pw *pickerWrapper) close() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	if pw.done {
 		return
 	}
-	bp.done = true
-	close(bp.blockingCh)
+	pw.done = true
+	close(pw.blockingCh)
 }

+ 65 - 24
vendor/google.golang.org/grpc/pickfirst.go

@@ -19,12 +19,14 @@
 package grpc
 
 import (
-	"context"
+	"errors"
 
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/status"
 )
 
 // PickFirstBalancerName is the name of the pick_first balancer.
@@ -45,35 +47,67 @@ func (*pickfirstBuilder) Name() string {
 }
 
 type pickfirstBalancer struct {
-	cc balancer.ClientConn
-	sc balancer.SubConn
+	state connectivity.State
+	cc    balancer.ClientConn
+	sc    balancer.SubConn
 }
 
+var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
+
 func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
 	if err != nil {
-		if grpclog.V(2) {
-			grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
-		}
+		b.ResolverError(err)
 		return
 	}
+	b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
+}
+
+func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+	b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+	switch b.state {
+	case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
+		// Set a failing picker if we don't have a good picker.
+		b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
+			Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
+		)
+	}
+	if grpclog.V(2) {
+		grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
+	}
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
+	if len(cs.ResolverState.Addresses) == 0 {
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
 	if b.sc == nil {
-		b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
+		var err error
+		b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
 		if err != nil {
-			//TODO(yuxuanli): why not change the cc state to Idle?
 			if grpclog.V(2) {
 				grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
 			}
-			return
+			b.state = connectivity.TransientFailure
+			b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
+				Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
+			)
+			return balancer.ErrBadResolverState
 		}
-		b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
+		b.state = connectivity.Idle
+		b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
 		b.sc.Connect()
 	} else {
-		b.sc.UpdateAddresses(addrs)
+		b.sc.UpdateAddresses(cs.ResolverState.Addresses)
 		b.sc.Connect()
 	}
+	return nil
 }
 
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
 	if grpclog.V(2) {
 		grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
 	}
@@ -83,18 +117,28 @@ func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s conn
 		}
 		return
 	}
-	if s == connectivity.Shutdown {
+	b.state = s.ConnectivityState
+	if s.ConnectivityState == connectivity.Shutdown {
 		b.sc = nil
 		return
 	}
 
-	switch s {
+	switch s.ConnectivityState {
 	case connectivity.Ready, connectivity.Idle:
-		b.cc.UpdateBalancerState(s, &picker{sc: sc})
+		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
 	case connectivity.Connecting:
-		b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
+		b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
 	case connectivity.TransientFailure:
-		b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
+		err := balancer.ErrTransientFailure
+		// TODO: this can be unconditional after the V1 API is removed, as
+		// SubConnState will always contain a connection error.
+		if s.ConnectionError != nil {
+			err = balancer.TransientFailureError(s.ConnectionError)
+		}
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: s.ConnectivityState,
+			Picker:            &picker{err: err},
+		})
 	}
 }
 
@@ -102,15 +146,12 @@ func (b *pickfirstBalancer) Close() {
 }
 
 type picker struct {
-	err error
-	sc  balancer.SubConn
+	result balancer.PickResult
+	err    error
 }
 
-func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	if p.err != nil {
-		return nil, nil, p.err
-	}
-	return p.sc, nil, nil
+func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
 }
 
 func init() {

+ 75 - 15
vendor/google.golang.org/grpc/resolver/resolver.go

@@ -21,6 +21,11 @@
 package resolver
 
 import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/attributes"
+	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/serviceconfig"
 )
 
@@ -69,12 +74,18 @@ func GetDefaultScheme() string {
 }
 
 // AddressType indicates the address type returned by name resolution.
+//
+// Deprecated: use Attributes in Address instead.
 type AddressType uint8
 
 const (
 	// Backend indicates the address is for a backend server.
+	//
+	// Deprecated: use Attributes in Address instead.
 	Backend AddressType = iota
 	// GRPCLB indicates the address is for a grpclb load balancer.
+	//
+	// Deprecated: use Attributes in Address instead.
 	GRPCLB
 )
 
@@ -83,33 +94,75 @@ const (
 type Address struct {
 	// Addr is the server address on which a connection will be established.
 	Addr string
-	// Type is the type of this address.
-	Type AddressType
+
 	// ServerName is the name of this address.
+	// If non-empty, the ServerName is used as the transport certification authority for
+	// the address, instead of the hostname from the Dial target string. In most cases,
+	// this should not be set.
 	//
-	// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
+	// If Type is GRPCLB, ServerName should be the name of the remote load
 	// balancer, not the name of the backend.
+	//
+	// WARNING: ServerName must only be populated with trusted values. It
+	// is insecure to populate it with data from untrusted inputs since untrusted
+	// values could be used to bypass the authority checks performed by TLS.
 	ServerName string
+
+	// Attributes contains arbitrary data about this address intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
+
+	// Type is the type of this address.
+	//
+	// Deprecated: use Attributes instead.
+	Type AddressType
+
 	// Metadata is the information associated with Addr, which may be used
 	// to make load balancing decision.
+	//
+	// Deprecated: use Attributes instead.
 	Metadata interface{}
 }
 
-// BuildOption includes additional information for the builder to create
+// BuildOptions includes additional information for the builder to create
 // the resolver.
-type BuildOption struct {
-	// DisableServiceConfig indicates whether resolver should fetch service config data.
+type BuildOptions struct {
+	// DisableServiceConfig indicates whether a resolver implementation should
+	// fetch service config data.
 	DisableServiceConfig bool
+	// DialCreds is the transport credentials used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithTransportCredentials). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithCredentialsBundle). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	CredsBundle credentials.Bundle
+	// Dialer is the custom dialer used by the ClientConn for dialling the
+	// target gRPC service (set via WithDialer). In cases where a name
+	// resolution service requires the same dialer, the resolver may use this
+	// field. In most cases though, it is not appropriate, and this field may
+	// be ignored.
+	Dialer func(context.Context, string) (net.Conn, error)
 }
 
 // State contains the current Resolver state relevant to the ClientConn.
 type State struct {
-	Addresses []Address // Resolved addresses for the target
-	// ServiceConfig is the parsed service config; obtained from
-	// serviceconfig.Parse.
-	ServiceConfig serviceconfig.Config
+	// Addresses is the latest set of resolved addresses for the target.
+	Addresses []Address
+
+	// ServiceConfig contains the result from parsing the latest service
+	// config.  If it is nil, it indicates no service config is present or the
+	// resolver does not provide service configs.
+	ServiceConfig *serviceconfig.ParseResult
 
-	// TODO: add Err error
+	// Attributes contains arbitrary data about the resolver intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
 }
 
 // ClientConn contains the callbacks for resolver to notify any updates
@@ -122,6 +175,10 @@ type State struct {
 type ClientConn interface {
 	// UpdateState updates the state of the ClientConn appropriately.
 	UpdateState(State)
+	// ReportError notifies the ClientConn that the Resolver encountered an
+	// error.  The ClientConn will notify the load balancer and begin calling
+	// ResolveNow on the Resolver with exponential backoff.
+	ReportError(error)
 	// NewAddress is called by resolver to notify ClientConn a new list
 	// of resolved addresses.
 	// The address list should be the complete list of resolved addresses.
@@ -133,6 +190,9 @@ type ClientConn interface {
 	//
 	// Deprecated: Use UpdateState instead.
 	NewServiceConfig(serviceConfig string)
+	// ParseServiceConfig parses the provided service config and returns an
+	// object that provides the parsed config.
+	ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
 }
 
 // Target represents a target for gRPC, as specified in:
@@ -164,14 +224,14 @@ type Builder interface {
 	//
 	// gRPC dial calls Build synchronously, and fails if the returned error is
 	// not nil.
-	Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
+	Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
 	// Scheme returns the scheme supported by this resolver.
 	// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
 	Scheme() string
 }
 
-// ResolveNowOption includes additional information for ResolveNow.
-type ResolveNowOption struct{}
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
 
 // Resolver watches for the updates on the specified target.
 // Updates include address updates and service config updates.
@@ -180,7 +240,7 @@ type Resolver interface {
 	// again. It's just a hint, resolver can ignore this if it's not necessary.
 	//
 	// It could be called multiple times concurrently.
-	ResolveNow(ResolveNowOption)
+	ResolveNow(ResolveNowOptions)
 	// Close closes the resolver.
 	Close()
 }

+ 126 - 72
vendor/google.golang.org/grpc/resolver_conn_wrapper.go

@@ -21,138 +21,192 @@ package grpc
 import (
 	"fmt"
 	"strings"
-	"sync/atomic"
+	"sync"
+	"time"
 
-	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
 )
 
 // ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConnection interface.
+// It implements resolver.ClientConn interface.
 type ccResolverWrapper struct {
-	cc       *ClientConn
-	resolver resolver.Resolver
-	addrCh   chan []resolver.Address
-	scCh     chan string
-	done     uint32 // accessed atomically; set to 1 when closed.
-	curState resolver.State
+	cc         *ClientConn
+	resolverMu sync.Mutex
+	resolver   resolver.Resolver
+	done       *grpcsync.Event
+	curState   resolver.State
+
+	pollingMu sync.Mutex
+	polling   chan struct{}
 }
 
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func split2(s, sep string) (string, string, bool) {
-	spl := strings.SplitN(s, sep, 2)
-	if len(spl) < 2 {
-		return "", "", false
+// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
+// returns a ccResolverWrapper object which wraps the newly built resolver.
+func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
+	ccr := &ccResolverWrapper{
+		cc:   cc,
+		done: grpcsync.NewEvent(),
 	}
-	return spl[0], spl[1], true
-}
-
-// parseTarget splits target into a struct containing scheme, authority and
-// endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func parseTarget(target string) (ret resolver.Target) {
-	var ok bool
-	ret.Scheme, ret.Endpoint, ok = split2(target, "://")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	return ret
-}
 
-// newCCResolverWrapper parses cc.target for scheme and gets the resolver
-// builder for this scheme and builds the resolver. The monitoring goroutine
-// for it is not started yet and can be created by calling start().
-//
-// If withResolverBuilder dial option is set, the specified resolver will be
-// used instead.
-func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
-	rb := cc.dopts.resolverBuilder
-	if rb == nil {
-		return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
+	var credsClone credentials.TransportCredentials
+	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+		credsClone = creds.Clone()
 	}
-
-	ccr := &ccResolverWrapper{
-		cc:     cc,
-		addrCh: make(chan []resolver.Address, 1),
-		scCh:   make(chan string, 1),
+	rbo := resolver.BuildOptions{
+		DisableServiceConfig: cc.dopts.disableServiceConfig,
+		DialCreds:            credsClone,
+		CredsBundle:          cc.dopts.copts.CredsBundle,
+		Dialer:               cc.dopts.copts.Dialer,
 	}
 
 	var err error
-	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
+	// We need to hold the lock here while we assign to the ccr.resolver field
+	// to guard against a data race caused by the following code path,
+	// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
+	// accessing ccr.resolver which is being assigned here.
+	ccr.resolverMu.Lock()
+	defer ccr.resolverMu.Unlock()
+	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
 	if err != nil {
 		return nil, err
 	}
 	return ccr, nil
 }
 
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
-	ccr.resolver.ResolveNow(o)
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+	ccr.resolverMu.Lock()
+	if !ccr.done.HasFired() {
+		ccr.resolver.ResolveNow(o)
+	}
+	ccr.resolverMu.Unlock()
 }
 
 func (ccr *ccResolverWrapper) close() {
+	ccr.resolverMu.Lock()
 	ccr.resolver.Close()
-	atomic.StoreUint32(&ccr.done, 1)
+	ccr.done.Fire()
+	ccr.resolverMu.Unlock()
 }
 
-func (ccr *ccResolverWrapper) isDone() bool {
-	return atomic.LoadUint32(&ccr.done) == 1
+// poll begins or ends asynchronous polling of the resolver based on whether
+// err is ErrBadResolverState.
+func (ccr *ccResolverWrapper) poll(err error) {
+	ccr.pollingMu.Lock()
+	defer ccr.pollingMu.Unlock()
+	if err != balancer.ErrBadResolverState {
+		// stop polling
+		if ccr.polling != nil {
+			close(ccr.polling)
+			ccr.polling = nil
+		}
+		return
+	}
+	if ccr.polling != nil {
+		// already polling
+		return
+	}
+	p := make(chan struct{})
+	ccr.polling = p
+	go func() {
+		for i := 0; ; i++ {
+			ccr.resolveNow(resolver.ResolveNowOptions{})
+			t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
+			select {
+			case <-p:
+				t.Stop()
+				return
+			case <-ccr.done.Done():
+				// Resolver has been closed.
+				t.Stop()
+				return
+			case <-t.C:
+				select {
+				case <-p:
+					return
+				default:
+				}
+				// Timer expired; re-resolve.
+			}
+		}
+	}()
 }
 
 func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
-	if ccr.isDone() {
+	if ccr.done.HasFired() {
 		return
 	}
-	grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
+	channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
 	if channelz.IsOn() {
 		ccr.addChannelzTraceEvent(s)
 	}
-	ccr.cc.updateResolverState(s)
 	ccr.curState = s
+	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+}
+
+func (ccr *ccResolverWrapper) ReportError(err error) {
+	if ccr.done.HasFired() {
+		return
+	}
+	channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+	ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
 }
 
 // NewAddress is called by the resolver implementation to send addresses to gRPC.
 func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
-	if ccr.isDone() {
+	if ccr.done.HasFired() {
 		return
 	}
-	grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
+	channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
 	if channelz.IsOn() {
 		ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
 	}
 	ccr.curState.Addresses = addrs
-	ccr.cc.updateResolverState(ccr.curState)
+	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
 }
 
 // NewServiceConfig is called by the resolver implementation to send service
 // configs to gRPC.
 func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
-	if ccr.isDone() {
+	if ccr.done.HasFired() {
 		return
 	}
-	grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
-	c, err := parseServiceConfig(sc)
-	if err != nil {
+	channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
+	if ccr.cc.dopts.disableServiceConfig {
+		channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
+		return
+	}
+	scpr := parseServiceConfig(sc)
+	if scpr.Err != nil {
+		channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
+		ccr.poll(balancer.ErrBadResolverState)
 		return
 	}
 	if channelz.IsOn() {
-		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
+		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
 	}
-	ccr.curState.ServiceConfig = c
-	ccr.cc.updateResolverState(ccr.curState)
+	ccr.curState.ServiceConfig = scpr
+	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
+}
+
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+	return parseServiceConfig(scJSON)
 }
 
 func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
 	var updates []string
-	oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
-	newSC, newOK := s.ServiceConfig.(*ServiceConfig)
+	var oldSC, newSC *ServiceConfig
+	var oldOK, newOK bool
+	if ccr.curState.ServiceConfig != nil {
+		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if s.ServiceConfig != nil {
+		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+	}
 	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
 		updates = append(updates, "service config updated")
 	}
@@ -161,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
 	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
 		updates = append(updates, "resolver returned new addresses")
 	}
-	channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
+	channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
 		Desc:     fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
 		Severity: channelz.CtINFO,
 	})

+ 50 - 24
vendor/google.golang.org/grpc/rpc_util.go

@@ -287,13 +287,14 @@ func (o FailFastCallOption) before(c *callInfo) error {
 }
 func (o FailFastCallOption) after(c *callInfo) {}
 
-// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
-func MaxCallRecvMsgSize(s int) CallOption {
-	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
+// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can receive.
+func MaxCallRecvMsgSize(bytes int) CallOption {
+	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
 }
 
 // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can receive.
+// size in bytes the client can receive.
 // This is an EXPERIMENTAL API.
 type MaxRecvMsgSizeCallOption struct {
 	MaxRecvMsgSize int
@@ -305,13 +306,14 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
 }
 func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
 
-// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
-func MaxCallSendMsgSize(s int) CallOption {
-	return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
+// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can send.
+func MaxCallSendMsgSize(bytes int) CallOption {
+	return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
 }
 
 // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can send.
+// size in bytes the client can send.
 // This is an EXPERIMENTAL API.
 type MaxSendMsgSizeCallOption struct {
 	MaxSendMsgSize int
@@ -648,35 +650,58 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
 		return nil, st.Err()
 	}
 
+	var size int
 	if pf == compressionMade {
 		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
 		// use this decompressor as the default.
 		if dc != nil {
 			d, err = dc.Do(bytes.NewReader(d))
-			if err != nil {
-				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
+			size = len(d)
 		} else {
-			dcReader, err := compressor.Decompress(bytes.NewReader(d))
-			if err != nil {
-				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
-			// Read from LimitReader with limit max+1. So if the underlying
-			// reader is over limit, the result will be bigger than max.
-			d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
-			if err != nil {
-				return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
-			}
+			d, size, err = decompress(compressor, d, maxReceiveMessageSize)
 		}
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+		}
+	} else {
+		size = len(d)
 	}
-	if len(d) > maxReceiveMessageSize {
+	if size > maxReceiveMessageSize {
 		// TODO: Revisit the error code. Currently keep it consistent with java
 		// implementation.
-		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
 	}
 	return d, nil
 }
 
+// Using compressor, decompress d, returning data and size.
+// Optionally, if data will be over maxReceiveMessageSize, just return the size.
+func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
+	dcReader, err := compressor.Decompress(bytes.NewReader(d))
+	if err != nil {
+		return nil, 0, err
+	}
+	if sizer, ok := compressor.(interface {
+		DecompressedSize(compressedBytes []byte) int
+	}); ok {
+		if size := sizer.DecompressedSize(d); size >= 0 {
+			if size > maxReceiveMessageSize {
+				return nil, size, nil
+			}
+			// size is used as an estimate to size the buffer, but we
+			// will read more data if available.
+			// +MinRead so ReadFrom will not reallocate if size is correct.
+			buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+			bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+			return buf.Bytes(), int(bytesRead), err
+		}
+	}
+	// Read from LimitReader with limit max+1. So if the underlying
+	// reader is over limit, the result will be bigger than max.
+	d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+	return d, len(d), err
+}
+
 // For the two compressor parameters, both should not be set, but if they are,
 // dc takes precedence over compressor.
 // TODO(dfawley): wrap the old compressor/decompressor using the new API?
@@ -848,7 +873,7 @@ type channelzData struct {
 
 // The SupportPackageIsVersion variables are referenced from generated protocol
 // buffer files to ensure compatibility with the gRPC version used.  The latest
-// support package version is 5.
+// support package version is 6.
 //
 // Older versions are kept for compatibility. They may be removed if
 // compatibility cannot be maintained.
@@ -858,6 +883,7 @@ const (
 	SupportPackageIsVersion3 = true
 	SupportPackageIsVersion4 = true
 	SupportPackageIsVersion5 = true
+	SupportPackageIsVersion6 = true
 )
 
 const grpcUA = "grpc-go/" + Version

+ 202 - 72
vendor/google.golang.org/grpc/server.go

@@ -116,6 +116,8 @@ type serverOptions struct {
 	dc                    Decompressor
 	unaryInt              UnaryServerInterceptor
 	streamInt             StreamServerInterceptor
+	chainUnaryInts        []UnaryServerInterceptor
+	chainStreamInts       []StreamServerInterceptor
 	inTapHandle           tap.ServerInHandle
 	statsHandler          stats.Handler
 	maxConcurrentStreams  uint32
@@ -130,6 +132,7 @@ type serverOptions struct {
 	readBufferSize        int
 	connectionTimeout     time.Duration
 	maxHeaderListSize     *uint32
+	headerTableSize       *uint32
 }
 
 var defaultServerOptions = serverOptions{
@@ -310,6 +313,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
 	})
 }
 
+// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
+// for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All unary interceptors added by this method will be chained.
+func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
 // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
 // server. Only one stream interceptor can be installed.
 func StreamInterceptor(i StreamServerInterceptor) ServerOption {
@@ -321,6 +334,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
 	})
 }
 
+// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
+// for stream RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All stream interceptors added by this method will be chained.
+func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
 // InTapHandle returns a ServerOption that sets the tap handle for all the server
 // transport to be created. Only one can be installed.
 func InTapHandle(h tap.ServerInHandle) ServerOption {
@@ -343,8 +366,8 @@ func StatsHandler(h stats.Handler) ServerOption {
 // unknown service handler. The provided method is a bidi-streaming RPC service
 // handler that will be invoked instead of returning the "unimplemented" gRPC
 // error whenever a request is received for an unregistered service or method.
-// The handling function has full access to the Context of the request and the
-// stream, and the invocation bypasses interceptors.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
 func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.unknownStreamDesc = &StreamDesc{
@@ -377,6 +400,16 @@ func MaxHeaderListSize(s uint32) ServerOption {
 	})
 }
 
+// HeaderTableSize returns a ServerOption that sets the size of dynamic
+// header table for stream.
+//
+// This API is EXPERIMENTAL.
+func HeaderTableSize(s uint32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.headerTableSize = &s
+	})
+}
+
 // NewServer creates a gRPC server which has no service registered and has not
 // started to accept requests yet.
 func NewServer(opt ...ServerOption) *Server {
@@ -393,6 +426,8 @@ func NewServer(opt ...ServerOption) *Server {
 		done:   grpcsync.NewEvent(),
 		czData: new(channelzData),
 	}
+	chainUnaryServerInterceptors(s)
+	chainStreamServerInterceptors(s)
 	s.cv = sync.NewCond(&s.mu)
 	if EnableTracing {
 		_, file, line, _ := runtime.Caller(1)
@@ -647,7 +682,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
 			s.mu.Lock()
 			s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
 			s.mu.Unlock()
-			grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
+			channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
 			rawConn.Close()
 		}
 		rawConn.SetDeadline(time.Time{})
@@ -686,6 +721,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
 		ReadBufferSize:        s.opts.readBufferSize,
 		ChannelzParentID:      s.channelzID,
 		MaxHeaderListSize:     s.opts.maxHeaderListSize,
+		HeaderTableSize:       s.opts.headerTableSize,
 	}
 	st, err := transport.NewServerTransport("http2", c, config)
 	if err != nil {
@@ -693,7 +729,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
 		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
 		s.mu.Unlock()
 		c.Close()
-		grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
+		channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
 		return nil
 	}
 
@@ -832,12 +868,12 @@ func (s *Server) incrCallsFailed() {
 func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
 	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
 	if err != nil {
-		grpclog.Errorln("grpc: server failed to encode response: ", err)
+		channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err)
 		return err
 	}
 	compData, err := compress(data, cp, comp)
 	if err != nil {
-		grpclog.Errorln("grpc: server failed to compress response: ", err)
+		channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err)
 		return err
 	}
 	hdr, payload := msgHeader(data, compData)
@@ -852,42 +888,93 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
 	return err
 }
 
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
-	if channelz.IsOn() {
-		s.incrCallsStarted()
-		defer func() {
-			if err != nil && err != io.EOF {
-				s.incrCallsFailed()
-			} else {
-				s.incrCallsSucceeded()
-			}
-		}()
+// chainUnaryServerInterceptors chains all unary server interceptors into one.
+func chainUnaryServerInterceptors(s *Server) {
+	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainUnaryInts
+	if s.opts.unaryInt != nil {
+		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
 	}
+
+	var chainedInt UnaryServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
+			return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
+		}
+	}
+
+	s.opts.unaryInt = chainedInt
+}
+
+// getChainUnaryHandler recursively generate the chained UnaryHandler
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+
+	return func(ctx context.Context, req interface{}) (interface{}, error) {
+		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
 	sh := s.opts.statsHandler
-	if sh != nil {
-		beginTime := time.Now()
-		begin := &stats.Begin{
-			BeginTime: beginTime,
+	if sh != nil || trInfo != nil || channelz.IsOn() {
+		if channelz.IsOn() {
+			s.incrCallsStarted()
 		}
-		sh.HandleRPC(stream.Context(), begin)
-		defer func() {
-			end := &stats.End{
+		var statsBegin *stats.Begin
+		if sh != nil {
+			beginTime := time.Now()
+			statsBegin = &stats.Begin{
 				BeginTime: beginTime,
-				EndTime:   time.Now(),
 			}
-			if err != nil && err != io.EOF {
-				end.Error = toRPCErr(err)
-			}
-			sh.HandleRPC(stream.Context(), end)
-		}()
-	}
-	if trInfo != nil {
-		defer trInfo.tr.Finish()
-		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+			sh.HandleRPC(stream.Context(), statsBegin)
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		}
+		// The deferred error handling for tracing, stats handler and channelz are
+		// combined into one function to reduce stack usage -- a defer takes ~56-64
+		// bytes on the stack, so overflowing the stack will require a stack
+		// re-allocation, which is expensive.
+		//
+		// To maintain behavior similar to separate deferred statements, statements
+		// should be executed in the reverse order. That is, tracing first, stats
+		// handler second, and channelz last. Note that panics *within* defers will
+		// lead to different behavior, but that's an acceptable compromise; that
+		// would be undefined behavior territory anyway.
 		defer func() {
-			if err != nil && err != io.EOF {
-				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				trInfo.tr.SetError()
+			if trInfo != nil {
+				if err != nil && err != io.EOF {
+					trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					trInfo.tr.SetError()
+				}
+				trInfo.tr.Finish()
+			}
+
+			if sh != nil {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(stream.Context(), end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
 			}
 		}()
 	}
@@ -960,7 +1047,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 	if err != nil {
 		if st, ok := status.FromError(err); ok {
 			if e := t.WriteStatus(stream, st); e != nil {
-				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+				channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
 			}
 		}
 		return err
@@ -1005,7 +1092,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 			trInfo.tr.SetError()
 		}
 		if e := t.WriteStatus(stream, appStatus); e != nil {
-			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+			channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 		}
 		if binlog != nil {
 			if h, _ := stream.Header(); h.Len() > 0 {
@@ -1032,9 +1119,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 			// The entire stream is done (for unary RPC only).
 			return err
 		}
-		if s, ok := status.FromError(err); ok {
-			if e := t.WriteStatus(stream, s); e != nil {
-				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+		if sts, ok := status.FromError(err); ok {
+			if e := t.WriteStatus(stream, sts); e != nil {
+				channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 			}
 		} else {
 			switch st := err.(type) {
@@ -1084,34 +1171,52 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
 	return err
 }
 
+// chainStreamServerInterceptors chains all stream server interceptors into one.
+func chainStreamServerInterceptors(s *Server) {
+	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainStreamInts
+	if s.opts.streamInt != nil {
+		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
+	}
+
+	var chainedInt StreamServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+			return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
+		}
+	}
+
+	s.opts.streamInt = chainedInt
+}
+
+// getChainStreamHandler recursively generate the chained StreamHandler
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+
+	return func(srv interface{}, ss ServerStream) error {
+		return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
 func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
 	if channelz.IsOn() {
 		s.incrCallsStarted()
-		defer func() {
-			if err != nil && err != io.EOF {
-				s.incrCallsFailed()
-			} else {
-				s.incrCallsSucceeded()
-			}
-		}()
 	}
 	sh := s.opts.statsHandler
+	var statsBegin *stats.Begin
 	if sh != nil {
 		beginTime := time.Now()
-		begin := &stats.Begin{
+		statsBegin = &stats.Begin{
 			BeginTime: beginTime,
 		}
-		sh.HandleRPC(stream.Context(), begin)
-		defer func() {
-			end := &stats.End{
-				BeginTime: beginTime,
-				EndTime:   time.Now(),
-			}
-			if err != nil && err != io.EOF {
-				end.Error = toRPCErr(err)
-			}
-			sh.HandleRPC(stream.Context(), end)
-		}()
+		sh.HandleRPC(stream.Context(), statsBegin)
 	}
 	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
 	ss := &serverStream{
@@ -1126,6 +1231,41 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
 		statsHandler:          sh,
 	}
 
+	if sh != nil || trInfo != nil || channelz.IsOn() {
+		// See comment in processUnaryRPC on defers.
+		defer func() {
+			if trInfo != nil {
+				ss.mu.Lock()
+				if err != nil && err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+				ss.trInfo.tr.Finish()
+				ss.trInfo.tr = nil
+				ss.mu.Unlock()
+			}
+
+			if sh != nil {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(stream.Context(), end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+
 	ss.binlog = binarylog.GetMethodLogger(stream.Method())
 	if ss.binlog != nil {
 		md, _ := metadata.FromIncomingContext(ctx)
@@ -1179,16 +1319,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
 
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
-		defer func() {
-			ss.mu.Lock()
-			if err != nil && err != io.EOF {
-				ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				ss.trInfo.tr.SetError()
-			}
-			ss.trInfo.tr.Finish()
-			ss.trInfo.tr = nil
-			ss.mu.Unlock()
-		}()
 	}
 	var appErr error
 	var server interface{}
@@ -1259,7 +1389,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
 				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
 				trInfo.tr.SetError()
 			}
-			grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+			channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
 		}
 		if trInfo != nil {
 			trInfo.tr.Finish()
@@ -1300,7 +1430,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
 			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
 			trInfo.tr.SetError()
 		}
-		grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+		channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
 	}
 	if trInfo != nil {
 		trInfo.tr.Finish()

+ 22 - 17
vendor/google.golang.org/grpc/service_config.go

@@ -136,9 +136,9 @@ type retryPolicy struct {
 	maxAttempts int
 
 	// Exponential backoff parameters. The initial retry attempt will occur at
-	// random(0, initialBackoffMS). In general, the nth attempt will occur at
+	// random(0, initialBackoff). In general, the nth attempt will occur at
 	// random(0,
-	//   min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
+	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
 	//
 	// These fields are required and must be greater than zero.
 	initialBackoff    time.Duration
@@ -261,20 +261,17 @@ type jsonSC struct {
 }
 
 func init() {
-	internal.ParseServiceConfig = func(sc string) (interface{}, error) {
-		return parseServiceConfig(sc)
-	}
+	internal.ParseServiceConfigForTesting = parseServiceConfig
 }
-
-func parseServiceConfig(js string) (*ServiceConfig, error) {
+func parseServiceConfig(js string) *serviceconfig.ParseResult {
 	if len(js) == 0 {
-		return nil, fmt.Errorf("no JSON service config provided")
+		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
 	}
 	var rsc jsonSC
 	err := json.Unmarshal([]byte(js), &rsc)
 	if err != nil {
 		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-		return nil, err
+		return &serviceconfig.ParseResult{Err: err}
 	}
 	sc := ServiceConfig{
 		LB:                rsc.LoadBalancingPolicy,
@@ -288,7 +285,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
 			if len(lbcfg) != 1 {
 				err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
 				grpclog.Warningf(err.Error())
-				return nil, err
+				return &serviceconfig.ParseResult{Err: err}
 			}
 			var name string
 			var jsonCfg json.RawMessage
@@ -303,17 +300,25 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
 				var err error
 				sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
 				if err != nil {
-					return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+					return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
 				}
 			} else if string(jsonCfg) != "{}" {
 				grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
 			}
 			break
 		}
+		if sc.lbConfig == nil {
+			// We had a loadBalancingConfig field but did not encounter a
+			// supported policy.  The config is considered invalid in this
+			// case.
+			err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
+			grpclog.Warningf(err.Error())
+			return &serviceconfig.ParseResult{Err: err}
+		}
 	}
 
 	if rsc.MethodConfig == nil {
-		return &sc, nil
+		return &serviceconfig.ParseResult{Config: &sc}
 	}
 	for _, m := range *rsc.MethodConfig {
 		if m.Name == nil {
@@ -322,7 +327,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
 		d, err := parseDuration(m.Timeout)
 		if err != nil {
 			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return nil, err
+			return &serviceconfig.ParseResult{Err: err}
 		}
 
 		mc := MethodConfig{
@@ -331,7 +336,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
 		}
 		if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
 			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return nil, err
+			return &serviceconfig.ParseResult{Err: err}
 		}
 		if m.MaxRequestMessageBytes != nil {
 			if *m.MaxRequestMessageBytes > int64(maxInt) {
@@ -356,13 +361,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
 
 	if sc.retryThrottling != nil {
 		if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
-			return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
 		}
 		if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
-			return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
 		}
 	}
-	return &sc, nil
+	return &serviceconfig.ParseResult{Config: &sc}
 }
 
 func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {

+ 7 - 14
vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go

@@ -22,27 +22,20 @@
 // This package is EXPERIMENTAL.
 package serviceconfig
 
-import (
-	"google.golang.org/grpc/internal"
-)
-
 // Config represents an opaque data structure holding a service config.
 type Config interface {
-	isConfig()
+	isServiceConfig()
 }
 
 // LoadBalancingConfig represents an opaque data structure holding a load
-// balancer config.
+// balancing config.
 type LoadBalancingConfig interface {
 	isLoadBalancingConfig()
 }
 
-// Parse parses the JSON service config provided into an internal form or
-// returns an error if the config is invalid.
-func Parse(ServiceConfigJSON string) (Config, error) {
-	c, err := internal.ParseServiceConfig(ServiceConfigJSON)
-	if err != nil {
-		return nil, err
-	}
-	return c.(Config), err
+// ParseResult contains a service config or an error.  Exactly one must be
+// non-nil.
+type ParseResult struct {
+	Config Config
+	Err    error
 }

+ 11 - 0
vendor/google.golang.org/grpc/stats/stats.go

@@ -91,6 +91,8 @@ type InHeader struct {
 	LocalAddr net.Addr
 	// Compression is the compression algorithm used for the RPC.
 	Compression string
+	// Header contains the header metadata received.
+	Header metadata.MD
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -104,6 +106,9 @@ type InTrailer struct {
 	Client bool
 	// WireLength is the wire length of trailer.
 	WireLength int
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this InTrailer is from the client side.
+	Trailer metadata.MD
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -146,6 +151,8 @@ type OutHeader struct {
 	LocalAddr net.Addr
 	// Compression is the compression algorithm used for the RPC.
 	Compression string
+	// Header contains the header metadata sent.
+	Header metadata.MD
 }
 
 // IsClient indicates if this stats information is from client side.
@@ -159,6 +166,9 @@ type OutTrailer struct {
 	Client bool
 	// WireLength is the wire length of trailer.
 	WireLength int
+	// Trailer contains the trailer metadata sent to the client. This
+	// field is only valid if this OutTrailer is from the server side.
+	Trailer metadata.MD
 }
 
 // IsClient indicates if this stats information is from client side.
@@ -176,6 +186,7 @@ type End struct {
 	EndTime time.Time
 	// Trailer contains the trailer metadata received from the server. This
 	// field is only valid if this End is from the client side.
+	// Deprecated: use Trailer in InTrailer instead.
 	Trailer metadata.MD
 	// Error is the error the RPC ended with. It is an error generated from
 	// status.Status and can be converted back to status.Status using

+ 3 - 4
vendor/google.golang.org/grpc/stream.go

@@ -31,7 +31,6 @@ import (
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/encoding"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/balancerload"
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
@@ -488,7 +487,7 @@ func (cs *clientStream) shouldRetry(err error) error {
 	pushback := 0
 	hasPushback := false
 	if cs.attempt.s != nil {
-		if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
+		if !cs.attempt.s.TrailersOnly() {
 			return err
 		}
 
@@ -498,13 +497,13 @@ func (cs *clientStream) shouldRetry(err error) error {
 		if len(sps) == 1 {
 			var e error
 			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
-				grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
+				channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
 				cs.retryThrottler.throttle() // This counts as a failure for throttling.
 				return err
 			}
 			hasPushback = true
 		} else if len(sps) > 1 {
-			grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
+			channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
 			cs.retryThrottler.throttle() // This counts as a failure for throttling.
 			return err
 		}

+ 0 - 3
vendor/google.golang.org/grpc/trace.go

@@ -41,9 +41,6 @@ func methodFamily(m string) string {
 	if i := strings.Index(m, "/"); i >= 0 {
 		m = m[:i] // remove everything from second slash
 	}
-	if i := strings.LastIndex(m, "."); i >= 0 {
-		m = m[i+1:] // cut down to last dotted component
-	}
 	return m
 }
 

+ 1 - 1
vendor/google.golang.org/grpc/version.go

@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.23.1"
+const Version = "1.28.1"