diff --git a/common/crypto/sanitize.go b/common/crypto/sanitize.go new file mode 100644 index 00000000000..4147c5d19aa --- /dev/null +++ b/common/crypto/sanitize.go @@ -0,0 +1,106 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package crypto + +import ( + "crypto/ecdsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "math/big" + "time" + + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric/bccsp/utils" + "github.com/pkg/errors" +) + +// SanitizeIdentity sanitizes the signature scheme of the identity +func SanitizeIdentity(identity []byte) ([]byte, error) { + sID := &msp.SerializedIdentity{} + if err := proto.Unmarshal(identity, sID); err != nil { + return nil, errors.Wrapf(err, "failed unmarshaling identity %s", string(identity)) + } + + der, _ := pem.Decode(sID.IdBytes) + if der == nil { + return nil, errors.Errorf("failed to PEM decode identity bytes: %s", string(sID.IdBytes)) + } + cert, err := x509.ParseCertificate(der.Bytes) + if err != nil { + return nil, errors.Wrapf(err, "failed parsing certificate %s", string(sID.IdBytes)) + } + + r, s, err := utils.UnmarshalECDSASignature(cert.Signature) + if err != nil { + return nil, errors.Wrapf(err, "failed unmarshaling ECDSA signature on identity: %s", string(sID.IdBytes)) + } + + // We assume that the consenter and the CA use the same signature scheme. + curveOrderUsedByCryptoGen := cert.PublicKey.(*ecdsa.PublicKey).Curve.Params().N + halfOrder := new(big.Int).Rsh(curveOrderUsedByCryptoGen, 1) + // Low S, nothing to do here! + if s.Cmp(halfOrder) != 1 { + return identity, nil + } + // Else it's high-S, so shift it below half the order. + s.Sub(curveOrderUsedByCryptoGen, s) + + var newCert certificate + _, err = asn1.Unmarshal(cert.Raw, &newCert) + if err != nil { + return nil, errors.Wrapf(err, "failed unmarshaling certificate") + } + + newSig, err := utils.MarshalECDSASignature(r, s) + if err != nil { + return nil, errors.Wrapf(err, "failed marshaling ECDSA signature") + } + newCert.SignatureValue = asn1.BitString{Bytes: newSig, BitLength: len(newSig) * 8} + + newCert.Raw = nil + newRaw, err := asn1.Marshal(newCert) + if err != nil { + return nil, errors.Wrapf(err, "failed marshaling new certificate") + } + + sID.IdBytes = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: newRaw}) + return proto.Marshal(sID) +} + +type certificate struct { + Raw asn1.RawContent + TBSCertificate tbsCertificate + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +type tbsCertificate struct { + Raw asn1.RawContent + Version int `asn1:"optional,explicit,default:0,tag:0"` + SerialNumber *big.Int + SignatureAlgorithm pkix.AlgorithmIdentifier + Issuer asn1.RawValue + Validity validity + Subject asn1.RawValue + PublicKey publicKeyInfo + UniqueId asn1.BitString `asn1:"optional,tag:1"` + SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` + Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` +} + +type validity struct { + NotBefore, NotAfter time.Time +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} diff --git a/common/crypto/signer.go b/common/crypto/signer.go new file mode 100644 index 00000000000..f431ca94495 --- /dev/null +++ b/common/crypto/signer.go @@ -0,0 +1,68 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package crypto + +import ( + cb "github.com/hyperledger/fabric-protos-go/common" +) + +// LocalSigner is a temporary stub interface which will be implemented by the local MSP +type LocalSigner interface { + SignatureHeaderMaker + Signer +} + +// Signer signs messages +type Signer interface { + // Sign a message and return the signature over the digest, or error on failure + Sign(message []byte) ([]byte, error) +} + +// IdentitySerializer serializes identities +type IdentitySerializer interface { + // Serialize converts an identity to bytes + Serialize() ([]byte, error) +} + +// SignatureHeaderMaker creates a new SignatureHeader +type SignatureHeaderMaker interface { + // NewSignatureHeader creates a SignatureHeader with the correct signing identity and a valid nonce + NewSignatureHeader() (*cb.SignatureHeader, error) +} + +// SignatureHeaderCreator creates signature headers +type SignatureHeaderCreator struct { + SignerSupport +} + +// SignerSupport implements the needed support for LocalSigner +type SignerSupport interface { + Signer + IdentitySerializer +} + +// NewSignatureHeaderCreator creates new signature headers +func NewSignatureHeaderCreator(ss SignerSupport) *SignatureHeaderCreator { + return &SignatureHeaderCreator{ss} +} + +// NewSignatureHeader creates a SignatureHeader with the correct signing identity and a valid nonce +func (bs *SignatureHeaderCreator) NewSignatureHeader() (*cb.SignatureHeader, error) { + creator, err := bs.Serialize() + if err != nil { + return nil, err + } + nonce, err := GetRandomNonce() + if err != nil { + return nil, err + } + + return &cb.SignatureHeader{ + Creator: creator, + Nonce: nonce, + }, nil +} diff --git a/docs/source/metrics_reference.rst b/docs/source/metrics_reference.rst index 28fa74b856a..b63466d80ab 100644 --- a/docs/source/metrics_reference.rst +++ b/docs/source/metrics_reference.rst @@ -85,6 +85,16 @@ The following orderer metrics are exported for consumption by Prometheus. +----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ | consensus_etcdraft_snapshot_block_number | gauge | The block number of the latest snapshot. | channel | | +----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_smartbft_cluster_size | gauge | Number of nodes in this channel. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_smartbft_committed_block_number | gauge | The number of the latest committed block. | channel | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_smartbft_is_leader | gauge | The leadership status of the current node according to the | channel | | +| | | latest committed block: 1 if it is the leader else 0. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ +| consensus_smartbft_leader_id | gauge | The id of the current leader according to the latest | channel | | +| | | committed block. | | | ++----------------------------------------------+-----------+------------------------------------------------------------+-----------+--------------------------------------------------------------------+ | deliver_blocks_sent | counter | The number of blocks sent by the deliver service. | channel | | | | | +-----------+--------------------------------------------------------------------+ | | | | filtered | | @@ -238,6 +248,16 @@ associated with the metric. +---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ | consensus.etcdraft.snapshot_block_number.%{channel} | gauge | The block number of the latest snapshot. | +---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ +| consensus.smartbft.cluster_size.%{channel} | gauge | Number of nodes in this channel. | ++---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ +| consensus.smartbft.committed_block_number.%{channel} | gauge | The number of the latest committed block. | ++---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ +| consensus.smartbft.is_leader.%{channel} | gauge | The leadership status of the current node according to the | +| | | latest committed block: 1 if it is the leader else 0. | ++---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ +| consensus.smartbft.leader_id.%{channel} | gauge | The id of the current leader according to the latest | +| | | committed block. | ++---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ | deliver.blocks_sent.%{channel}.%{filtered}.%{data_type} | counter | The number of blocks sent by the deliver service. | +---------------------------------------------------------------------------+-----------+------------------------------------------------------------+ | deliver.requests_completed.%{channel}.%{filtered}.%{data_type}.%{success} | counter | The number of deliver requests that have been completed. | diff --git a/go.mod b/go.mod index f8b16e6d9c5..f4f209c2962 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( code.cloudfoundry.org/clock v1.0.0 github.com/IBM/idemix v0.0.0-20220112103229-701e7610d405 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible + github.com/SmartBFT-Go/consensus v0.3.0 github.com/VictoriaMetrics/fastcache v1.9.0 github.com/bits-and-blooms/bitset v1.2.1 github.com/cheggaaa/pb v1.0.29 @@ -19,7 +20,7 @@ require ( github.com/hyperledger/fabric-chaincode-go v0.0.0-20220713164125-8f0791c989d7 github.com/hyperledger/fabric-config v0.1.0 github.com/hyperledger/fabric-lib-go v1.0.0 - github.com/hyperledger/fabric-protos-go v0.0.0-20220827195505-ce4c067a561d + github.com/hyperledger/fabric-protos-go v0.0.0-20221109160343-add83d6f2564 github.com/kr/pretty v0.3.0 github.com/miekg/pkcs11 v1.1.1 github.com/mitchellh/mapstructure v1.4.3 @@ -37,7 +38,7 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.1 go.etcd.io/etcd/raft/v3 v3.5.1 go.etcd.io/etcd/server/v3 v3.5.1 - go.uber.org/zap v1.17.0 + go.uber.org/zap v1.19.0 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 golang.org/x/tools v0.1.2 google.golang.org/grpc v1.47.0 @@ -102,6 +103,7 @@ require ( go.uber.org/multierr v1.6.0 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/go.sum b/go.sum index d8e010342db..c21ea1e548b 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/SmartBFT-Go/consensus v0.3.0 h1:C7PHKU6K6DWpMpK93asWRcX/yqjJ38oYNl57JcJIqsY= +github.com/SmartBFT-Go/consensus v0.3.0/go.mod h1:8lSsp7HiVCp7GKEstTrfvGMb7xuVz8jxknd0C7Au8HE= github.com/VictoriaMetrics/fastcache v1.9.0 h1:oMwsS6c8abz98B7ytAewQ7M1ZN/Im/iwKoE1euaFvhs= github.com/VictoriaMetrics/fastcache v1.9.0/go.mod h1:otoTS3xu+6IzF/qByjqzjp3rTuzM3Qf0ScU1UTj97iU= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= @@ -60,6 +62,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -305,8 +309,8 @@ github.com/hyperledger/fabric-lib-go v1.0.0/go.mod h1:H362nMlunurmHwkYqR5uHL2UDW github.com/hyperledger/fabric-protos-go v0.0.0-20200424173316-dd554ba3746e/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= github.com/hyperledger/fabric-protos-go v0.0.0-20210911123859-041d13f0980c/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= github.com/hyperledger/fabric-protos-go v0.0.0-20220516090339-9685156fada6/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= -github.com/hyperledger/fabric-protos-go v0.0.0-20220827195505-ce4c067a561d h1:Dk7Z9MjzZmz+pkpC7KbH6c3A9PEN9youAIjlMJw58ro= -github.com/hyperledger/fabric-protos-go v0.0.0-20220827195505-ce4c067a561d/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= +github.com/hyperledger/fabric-protos-go v0.0.0-20221109160343-add83d6f2564 h1:yADGk0j7qovuViO42FnAGJv7+5faoM3SOl4PlMQUGmU= +github.com/hyperledger/fabric-protos-go v0.0.0-20221109160343-add83d6f2564/go.mod h1:xVYTjK4DtZRBxZ2D9aE4y6AbLaPwue2o/criQyQbVD0= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -540,6 +544,7 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -609,6 +614,7 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -620,9 +626,11 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -649,6 +657,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -700,8 +710,10 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -802,6 +814,7 @@ golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= diff --git a/internal/pkg/comm/config.go b/internal/pkg/comm/config.go index e85f9aa0057..f9e3a579bcf 100644 --- a/internal/pkg/comm/config.go +++ b/internal/pkg/comm/config.go @@ -153,6 +153,12 @@ func (cc ClientConfig) Dial(address string) (*grpc.ClientConn, error) { return conn, nil } +// Clone clones this ClientConfig +func (cc ClientConfig) Clone() ClientConfig { + shallowClone := cc + return shallowClone +} + // SecureOptions defines the TLS security parameters for a GRPCServer or // GRPCClient instance. type SecureOptions struct { diff --git a/orderer/common/cluster/util.go b/orderer/common/cluster/util.go index fb73352c792..c6690cb2f73 100644 --- a/orderer/common/cluster/util.go +++ b/orderer/common/cluster/util.go @@ -944,3 +944,55 @@ func SHA256Digest(data []byte) []byte { hash := sha256.Sum256(data) return hash[:] } + +// VerifyBlocksBFT verifies the given consecutive sequence of blocks is valid, always verifies signature, +// and returns nil if it's valid, else an error. +func VerifyBlocksBFT(blockBuff []*common.Block, signatureVerifier BlockVerifier) error { + return verifyBlockSequence(blockBuff, signatureVerifier, true) +} + +func verifyBlockSequence(blockBuff []*common.Block, signatureVerifier BlockVerifier, alwaysCheckSig bool) error { + if len(blockBuff) == 0 { + return errors.New("buffer is empty") + } + // First, we verify that the block hash in every block is: + // Equal to the hash in the header + // Equal to the previous hash in the succeeding block + for i := range blockBuff { + if err := VerifyBlockHash(i, blockBuff); err != nil { + return err + } + } + + var config *common.ConfigEnvelope + var isLastBlockConfigBlock bool + // Verify all configuration blocks that are found inside the block batch, + // with the configuration that was committed (nil) or with one that is picked up + // during iteration over the block batch. + for _, block := range blockBuff { + configFromBlock, err := ConfigFromBlock(block) + if err == errNotAConfig && !alwaysCheckSig { + isLastBlockConfigBlock = false + continue + } + if err != nil && !alwaysCheckSig { + return err + } + // The block is a configuration block, so verify it + if err := VerifyBlockSignature(block, signatureVerifier, config); err != nil { + return err + } + config = configFromBlock + isLastBlockConfigBlock = true + } + + // Verify the last block's signature + lastBlock := blockBuff[len(blockBuff)-1] + + // If last block is a config block, we verified it using the policy of the previous block, so it's valid. + if isLastBlockConfigBlock { + return nil + } + + return VerifyBlockSignature(lastBlock, signatureVerifier, config) +} diff --git a/orderer/common/localconfig/config.go b/orderer/common/localconfig/config.go index b0cdf0f9900..52619d8092d 100644 --- a/orderer/common/localconfig/config.go +++ b/orderer/common/localconfig/config.go @@ -359,3 +359,8 @@ func translateCAs(configDir string, certificateAuthorities []string) []string { } return results } + +// Consensus indicates the orderer type. +type Consensus struct { + Type string `yaml:"type,omitempty"` +} diff --git a/orderer/common/multichannel/registrar.go b/orderer/common/multichannel/registrar.go index 7bdd2213737..526173f0a36 100644 --- a/orderer/common/multichannel/registrar.go +++ b/orderer/common/multichannel/registrar.go @@ -1149,3 +1149,28 @@ func channelNameFromConfigTx(configtx *cb.Envelope) (string, error) { return chdr.ChannelId, nil } + +func (r *Registrar) ApplyFilters(channel string, env *cb.Envelope) error { + r.lock.RLock() + cs, exists := r.chains[channel] + r.lock.RUnlock() + + if !exists { + // This is for the system channel + return msgprocessor.CreateSystemChannelFilters(r.config, r, r.systemChannel, r.systemChannel.MetadataValidator).Apply(env) + } + + return msgprocessor.CreateStandardChannelFilters(cs, r.config).Apply(env) +} + +func (r *Registrar) ProposeConfigUpdate(channel string, configtx *cb.Envelope) (*cb.ConfigEnvelope, error) { + r.lock.RLock() + cs, exists := r.chains[channel] + r.lock.RUnlock() + + if !exists { + return nil, errors.Errorf("channel %s doesn't exist", channel) + } + + return cs.ProposeConfigUpdate(configtx) +} diff --git a/orderer/common/onboarding/onboarding.go b/orderer/common/onboarding/onboarding.go index 14cc9a1c1f7..e1d599f7068 100644 --- a/orderer/common/onboarding/onboarding.go +++ b/orderer/common/onboarding/onboarding.go @@ -435,3 +435,23 @@ func ValidateBootstrapBlock(block *common.Block, bccsp bccsp.BCCSP) error { } return nil } + +// ConsensusType returns the consensus type from the given genesis block. +func ConsensusType(genesisBlock *common.Block, bccsp bccsp.BCCSP) string { + if genesisBlock == nil || genesisBlock.Data == nil || len(genesisBlock.Data.Data) == 0 { + logger.Fatalf("Empty genesis block") + } + env := &common.Envelope{} + if err := proto.Unmarshal(genesisBlock.Data.Data[0], env); err != nil { + logger.Fatalf("Failed to unmarshal the genesis block's envelope: %v", err) + } + bundle, err := channelconfig.NewBundleFromEnvelope(env, bccsp) + if err != nil { + logger.Fatalf("Failed creating bundle from the genesis block: %v", err) + } + ordConf, exists := bundle.OrdererConfig() + if !exists { + logger.Fatalf("Orderer config doesn't exist in bundle derived from genesis block") + } + return ordConf.ConsensusType() +} diff --git a/orderer/common/server/main.go b/orderer/common/server/main.go index 9e91e8809f6..61dc8a018e6 100644 --- a/orderer/common/server/main.go +++ b/orderer/common/server/main.go @@ -49,7 +49,9 @@ import ( "github.com/hyperledger/fabric/orderer/common/onboarding" "github.com/hyperledger/fabric/orderer/consensus" "github.com/hyperledger/fabric/orderer/consensus/etcdraft" + "github.com/hyperledger/fabric/orderer/consensus/smartbft" "github.com/hyperledger/fabric/protoutil" + "github.com/mitchellh/mapstructure" "go.uber.org/zap/zapcore" "google.golang.org/grpc" "gopkg.in/alecthomas/kingpin.v2" @@ -800,6 +802,7 @@ func initializeMultichannelRegistrar( bccsp bccsp.BCCSP, callbacks ...channelconfig.BundleActor, ) *multichannel.Registrar { + dpmr := &DynamicPolicyManagerRegistry{} registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, bccsp, clusterDialer, callbacks...) consenters := map[string]consensus.Consenter{} @@ -807,10 +810,37 @@ func initializeMultichannelRegistrar( if conf.General.BootstrapMethod == "file" || conf.General.BootstrapMethod == "none" { if bootstrapBlock != nil && isClusterType(bootstrapBlock, bccsp) { // with a system channel - initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, repInitiator, srvConf, srv, registrar, metricsProvider, bccsp) + consenterType := onboarding.ConsensusType(bootstrapBlock, bccsp) + switch consenterType { + case "etcdraft": + initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, repInitiator, srvConf, srv, registrar, metricsProvider, bccsp) + case "smartbft": + initializeSmartBFTConsenter(signer, dpmr, consenters, conf, lf, clusterDialer, bootstrapBlock, repInitiator, srvConf, srv, registrar, metricsProvider, bccsp) + default: + logger.Panicf("Unknown cluster type consenter") + } } else if bootstrapBlock == nil { // without a system channel: assume cluster type, InactiveChainRegistry == nil, no go-routine. - consenters["etcdraft"] = etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, nil, metricsProvider, bccsp) + consenterType := "smartbft" + bootstrapBlock := initSystemChannelWithJoinBlock(conf, bccsp, lf) + if bootstrapBlock != nil { + consenterType = onboarding.ConsensusType(bootstrapBlock, bccsp) + } else { + // load consensus type from orderer config + var consensusConfig localconfig.Consensus + if err := mapstructure.Decode(conf.Consensus, &consensusConfig); err == nil && consensusConfig.Type != "" { + consenterType = consensusConfig.Type + } + } + // the orderer can start without channels at all and have an initialized cluster type consenter + switch consenterType { + case "etcdraft": + consenters["etcdraft"] = etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, nil, metricsProvider, bccsp) + case "smartbft": + consenters["smartbft"] = smartbft.New(nil, dpmr.Registry(), signer, clusterDialer, conf, srvConf, srv, registrar, metricsProvider, bccsp) + default: + logger.Panicf("Unknown cluster type consenter '%s'", consenterType) + } } } @@ -845,6 +875,47 @@ func initializeEtcdraftConsenter(consenters map[string]consensus.Consenter, conf consenters["etcdraft"] = raftConsenter } +func initializeSmartBFTConsenter( + signer identity.SignerSerializer, + dpmr *DynamicPolicyManagerRegistry, + consenters map[string]consensus.Consenter, + conf *localconfig.TopLevel, + lf blockledger.Factory, + clusterDialer *cluster.PredicateDialer, + bootstrapBlock *cb.Block, + ri *onboarding.ReplicationInitiator, + srvConf comm.ServerConfig, + srv *comm.GRPCServer, + registrar *multichannel.Registrar, + metricsProvider metrics.Provider, + bccsp bccsp.BCCSP, +) *smartbft.Consenter { + systemChannelName, err := protoutil.GetChannelIDFromBlock(bootstrapBlock) + if err != nil { + logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err) + } + systemLedger, err := lf.GetOrCreate(systemChannelName) + if err != nil { + logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err) + } + getConfigBlock := func() *cb.Block { + return multichannel.ConfigBlockOrPanic(systemLedger) + } + icr := onboarding.NewInactiveChainReplicator(ri, getConfigBlock, ri.RegisterChain, conf.General.Cluster.ReplicationBackgroundRefreshInterval) + + // Use the inactiveChainReplicator as a channel lister, since it has knowledge + // of all inactive chains. + // This is to prevent us pulling the entire system chain when attempting to enumerate + // the channels in the system. + ri.ChannelLister = icr + + go icr.Run() + smartBFTConsenter := smartbft.New(icr, dpmr.Registry(), signer, clusterDialer, conf, srvConf, srv, registrar, metricsProvider, bccsp) + consenters["smartbft"] = smartBFTConsenter + + return smartBFTConsenter +} + func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System { return operations.NewSystem(operations.Options{ Options: fabhttp.Options{ diff --git a/orderer/common/server/policymanager.go b/orderer/common/server/policymanager.go new file mode 100644 index 00000000000..5ea3d37b046 --- /dev/null +++ b/orderer/common/server/policymanager.go @@ -0,0 +1,57 @@ +/* +Copyright IBM Corp. 2017 All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package server + +import ( + "sync" + + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/common/policies" +) + +type DynamicPolicyManagerRegistry struct { + m sync.Map + Logger *flogging.FabricLogger +} + +func (dpmr *DynamicPolicyManagerRegistry) Update(bundle *channelconfig.Bundle) { + chainID := bundle.ConfigtxValidator().ChannelID() + dpmr.m.Store(chainID, bundle.PolicyManager()) +} + +func (dpmr *DynamicPolicyManagerRegistry) Registry() func(channel string) policies.Manager { + return func(channel string) policies.Manager { + return &dynamicPolicyManager{ + m: &dpmr.m, + logger: dpmr.Logger, + channel: channel, + } + } +} + +type dynamicPolicyManager struct { + channel string + m *sync.Map + logger *flogging.FabricLogger +} + +func (dpm *dynamicPolicyManager) GetPolicy(id string) (policies.Policy, bool) { + o, ok := dpm.m.Load(dpm.channel) + if !ok { + return nil, false + } + return o.(policies.Manager).GetPolicy(id) +} + +func (dpm *dynamicPolicyManager) Manager(path []string) (policies.Manager, bool) { + o, ok := dpm.m.Load(dpm.channel) + if !ok { + return nil, false + } + return o.(policies.Manager).Manager(path) +} diff --git a/orderer/consensus/etcdraft/util.go b/orderer/consensus/etcdraft/util.go index 571a013c9ec..fc4d065457e 100644 --- a/orderer/consensus/etcdraft/util.go +++ b/orderer/consensus/etcdraft/util.go @@ -465,3 +465,36 @@ func CreateConsentersMap(blockMetadata *etcdraft.BlockMetadata, configMetadata * } return consenters } + +func CreateX509VerifyOptions(ordererConfig channelconfig.Orderer) (x509.VerifyOptions, error) { + tlsRoots := x509.NewCertPool() + tlsIntermediates := x509.NewCertPool() + + for _, org := range ordererConfig.Organizations() { + rootCerts, err := parseCertificateListFromBytes(org.MSP().GetTLSRootCerts()) + if err != nil { + return x509.VerifyOptions{}, errors.Wrap(err, "parsing tls root certs") + } + intermediateCerts, err := parseCertificateListFromBytes(org.MSP().GetTLSIntermediateCerts()) + if err != nil { + return x509.VerifyOptions{}, errors.Wrap(err, "parsing tls intermediate certs") + } + + for _, cert := range rootCerts { + tlsRoots.AddCert(cert) + } + + for _, cert := range intermediateCerts { + tlsIntermediates.AddCert(cert) + } + } + + return x509.VerifyOptions{ + Roots: tlsRoots, + Intermediates: tlsIntermediates, + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + }, nil +} diff --git a/orderer/consensus/smartbft/assembler.go b/orderer/consensus/smartbft/assembler.go new file mode 100644 index 00000000000..1940238fe59 --- /dev/null +++ b/orderer/consensus/smartbft/assembler.go @@ -0,0 +1,194 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "encoding/asn1" + "sync/atomic" + + "github.com/SmartBFT-Go/consensus/pkg/types" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/orderer/common/cluster" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +//go:generate mockery -dir . -name Ledger -case underscore -output mocks + +// Ledger returns the height and a block with the given number +type Ledger interface { + // Height returns the number of blocks in the ledger this channel is associated with. + Height() uint64 + + // Block returns a block with the given number, + // or nil if such a block doesn't exist. + Block(number uint64) *cb.Block +} + +// Assembler is the proposal assembler +type Assembler struct { + RuntimeConfig *atomic.Value + Logger *flogging.FabricLogger + VerificationSeq func() uint64 +} + +// AssembleProposal assembles a proposal from the metadata and the request +func (a *Assembler) AssembleProposal(metadata []byte, requests [][]byte) (nextProp types.Proposal) { + rtc := a.RuntimeConfig.Load().(RuntimeConfig) + + lastConfigBlockNum := rtc.LastConfigBlock.Header.Number + lastBlock := rtc.LastBlock + + if len(requests) == 0 { + a.Logger.Panicf("Programming error, no requests in proposal") + } + batchedRequests := singleConfigTxOrSeveralNonConfigTx(requests, a.Logger) + + block := protoutil.NewBlock(lastBlock.Header.Number+1, protoutil.BlockHeaderHash(lastBlock.Header)) + block.Data = &cb.BlockData{Data: batchedRequests} + block.Header.DataHash = protoutil.BlockDataHash(block.Data) + + if protoutil.IsConfigBlock(block) { + lastConfigBlockNum = block.Header.Number + } + + block.Metadata.Metadata[cb.BlockMetadataIndex_LAST_CONFIG] = protoutil.MarshalOrPanic(&cb.Metadata{ + Value: protoutil.MarshalOrPanic(&cb.LastConfig{Index: lastConfigBlockNum}), + }) + block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = protoutil.MarshalOrPanic(&cb.Metadata{ + Value: protoutil.MarshalOrPanic(&cb.OrdererBlockMetadata{ + ConsenterMetadata: metadata, + LastConfig: &cb.LastConfig{ + Index: lastConfigBlockNum, + }, + }), + }) + + tuple := &ByteBufferTuple{ + A: protoutil.MarshalOrPanic(block.Data), + B: protoutil.MarshalOrPanic(block.Metadata), + } + + prop := types.Proposal{ + Header: protoutil.BlockHeaderBytes(block.Header), + Payload: tuple.ToBytes(), + Metadata: metadata, + VerificationSequence: int64(a.VerificationSeq()), + } + + return prop +} + +func singleConfigTxOrSeveralNonConfigTx(requests [][]byte, logger Logger) [][]byte { + // Scan until a config transaction is found + var batchedRequests [][]byte + var i int + for i < len(requests) { + currentRequest := requests[i] + envelope, err := protoutil.UnmarshalEnvelope(currentRequest) + if err != nil { + logger.Panicf("Programming error, received bad envelope but should have validated it: %v", err) + continue + } + + // If we saw a config transaction, we cannot add any more transactions to the batch. + if protoutil.IsConfigTransaction(envelope) { + break + } + + // Else, it's not a config transaction, so add it to the batch. + batchedRequests = append(batchedRequests, currentRequest) + i++ + } + + // If we don't have any transaction in the batch, it is safe to assume we only + // saw a single transaction which is a config transaction. + if len(batchedRequests) == 0 { + batchedRequests = [][]byte{requests[0]} + } + + // At this point, batchedRequests contains either a single config transaction, or a few non config transactions. + return batchedRequests +} + +// LastConfigBlockFromLedgerOrPanic returns the last config block from the ledger +func LastConfigBlockFromLedgerOrPanic(ledger Ledger, logger Logger) *cb.Block { + block, err := lastConfigBlockFromLedger(ledger) + if err != nil { + logger.Panicf("Failed retrieving last config block: %v", err) + } + return block +} + +func lastConfigBlockFromLedger(ledger Ledger) (*cb.Block, error) { + lastBlockSeq := ledger.Height() - 1 + lastBlock := ledger.Block(lastBlockSeq) + if lastBlock == nil { + return nil, errors.Errorf("unable to retrieve block [%d]", lastBlockSeq) + } + lastConfigBlock, err := cluster.LastConfigBlock(lastBlock, ledger) + if err != nil { + return nil, err + } + return lastConfigBlock, nil +} + +func PreviousConfigBlockFromLedgerOrPanic(ledger Ledger, logger Logger) *cb.Block { + block, err := previousConfigBlockFromLedger(ledger) + if err != nil { + logger.Panicf("Failed retrieving previous config block: %v", err) + } + return block +} + +func previousConfigBlockFromLedger(ledger Ledger) (*cb.Block, error) { + previousBlockSeq := ledger.Height() - 2 + if ledger.Height() == 1 { + previousBlockSeq = 0 + } + previousBlock := ledger.Block(previousBlockSeq) + if previousBlock == nil { + return nil, errors.Errorf("unable to retrieve block [%d]", previousBlockSeq) + } + previousConfigBlock, err := cluster.LastConfigBlock(previousBlock, ledger) + if err != nil { + return nil, err + } + return previousConfigBlock, nil +} + +// LastBlockFromLedgerOrPanic returns the last block from the ledger +func LastBlockFromLedgerOrPanic(ledger Ledger, logger Logger) *cb.Block { + lastBlockSeq := ledger.Height() - 1 + lastBlock := ledger.Block(lastBlockSeq) + if lastBlock == nil { + logger.Panicf("Failed retrieving last block") + } + return lastBlock +} + +// ByteBufferTuple is the byte slice tuple +type ByteBufferTuple struct { + A []byte + B []byte +} + +// ToBytes marshals the buffer tuple to bytes +func (bbt *ByteBufferTuple) ToBytes() []byte { + bytes, err := asn1.Marshal(*bbt) + if err != nil { + panic(err) + } + return bytes +} + +// FromBytes unmarshals bytes to a buffer tuple +func (bbt *ByteBufferTuple) FromBytes(bytes []byte) error { + _, err := asn1.Unmarshal(bytes, bbt) + return err +} diff --git a/orderer/consensus/smartbft/chain.go b/orderer/consensus/smartbft/chain.go new file mode 100644 index 00000000000..90a50cf4a08 --- /dev/null +++ b/orderer/consensus/smartbft/chain.go @@ -0,0 +1,603 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "encoding/base64" + "fmt" + "reflect" + "sync" + "sync/atomic" + "time" + + smartbft "github.com/SmartBFT-Go/consensus/pkg/consensus" + "github.com/SmartBFT-Go/consensus/pkg/types" + "github.com/SmartBFT-Go/consensus/pkg/wal" + "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric/bccsp" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/common/policies" + "github.com/hyperledger/fabric/orderer/common/cluster" + "github.com/hyperledger/fabric/orderer/common/msgprocessor" + types2 "github.com/hyperledger/fabric/orderer/common/types" + "github.com/hyperledger/fabric/orderer/consensus" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +//go:generate counterfeiter -o mocks/mock_blockpuller.go . BlockPuller + +// BlockPuller is used to pull blocks from other OSN +type BlockPuller interface { + PullBlock(seq uint64) *cb.Block + HeightsByEndpoints() (map[string]uint64, error) + Close() +} + +// WALConfig consensus specific configuration parameters from orderer.yaml; for SmartBFT only WALDir is relevant. +type WALConfig struct { + WALDir string // WAL data of is stored in WALDir/ + SnapDir string // Snapshots of are stored in SnapDir/ + EvictionSuspicion string // Duration threshold that the node samples in order to suspect its eviction from the channel. +} + +// ConfigValidator interface +type ConfigValidator interface { + ValidateConfig(env *cb.Envelope) error +} + +type signerSerializer interface { + // Sign a message and return the signature over the digest, or error on failure + Sign(message []byte) ([]byte, error) + + // Serialize converts an identity to bytes + Serialize() ([]byte, error) +} + +// BFTChain implements Chain interface to wire with +// BFT smart library +type BFTChain struct { + RuntimeConfig *atomic.Value + Channel string + Config types.Configuration + BlockPuller BlockPuller + Comm cluster.Communicator + SignerSerializer signerSerializer + PolicyManager policies.Manager + Logger *flogging.FabricLogger + WALDir string + consensus *smartbft.Consensus + support consensus.ConsenterSupport + verifier *Verifier + assembler *Assembler + Metrics *Metrics + bccsp bccsp.BCCSP + + statusReportMutex sync.Mutex + consensusRelation types2.ConsensusRelation + status types2.Status +} + +// NewChain creates new BFT Smart chain +func NewChain( + cv ConfigValidator, + selfID uint64, + config types.Configuration, + walDir string, + blockPuller BlockPuller, + comm cluster.Communicator, + signerSerializer signerSerializer, + policyManager policies.Manager, + support consensus.ConsenterSupport, + metrics *Metrics, + bccsp bccsp.BCCSP, + +) (*BFTChain, error) { + requestInspector := &RequestInspector{ + ValidateIdentityStructure: func(_ *msp.SerializedIdentity) error { + return nil + }, + } + + logger := flogging.MustGetLogger("orderer.consensus.smartbft.chain").With(zap.String("channel", support.ChannelID())) + + c := &BFTChain{ + RuntimeConfig: &atomic.Value{}, + Channel: support.ChannelID(), + Config: config, + WALDir: walDir, + Comm: comm, + support: support, + SignerSerializer: signerSerializer, + PolicyManager: policyManager, + BlockPuller: blockPuller, + Logger: logger, + consensusRelation: types2.ConsensusRelationConsenter, + status: types2.StatusActive, + Metrics: &Metrics{ + ClusterSize: metrics.ClusterSize.With("channel", support.ChannelID()), + CommittedBlockNumber: metrics.CommittedBlockNumber.With("channel", support.ChannelID()), + IsLeader: metrics.IsLeader.With("channel", support.ChannelID()), + LeaderID: metrics.LeaderID.With("channel", support.ChannelID()), + }, + bccsp: bccsp, + } + + lastBlock := LastBlockFromLedgerOrPanic(support, c.Logger) + lastConfigBlock := LastConfigBlockFromLedgerOrPanic(support, c.Logger) + + rtc := RuntimeConfig{ + logger: logger, + id: selfID, + } + rtc, err := rtc.BlockCommitted(lastConfigBlock, bccsp) + if err != nil { + return nil, errors.Wrap(err, "failed constructing RuntimeConfig") + } + rtc, err = rtc.BlockCommitted(lastBlock, bccsp) + if err != nil { + return nil, errors.Wrap(err, "failed constructing RuntimeConfig") + } + + c.RuntimeConfig.Store(rtc) + + c.verifier = buildVerifier(cv, c.RuntimeConfig, support, requestInspector, policyManager) + c.consensus = bftSmartConsensusBuild(c, requestInspector) + + // Setup communication with list of remotes notes for the new channel + c.Comm.Configure(c.support.ChannelID(), rtc.RemoteNodes) + + if err := c.consensus.ValidateConfiguration(rtc.Nodes); err != nil { + return nil, errors.Wrap(err, "failed to verify SmartBFT-Go configuration") + } + + logger.Infof("SmartBFT-v3 is now servicing chain %s", support.ChannelID()) + + return c, nil +} + +func bftSmartConsensusBuild( + c *BFTChain, + requestInspector *RequestInspector, +) *smartbft.Consensus { + var err error + + rtc := c.RuntimeConfig.Load().(RuntimeConfig) + + // latestMetadata, err := getViewMetadataFromBlock(rtc.LastBlock) + if err != nil { + c.Logger.Panicf("Failed extracting view metadata from ledger: %v", err) + } + + var consensusWAL *wal.WriteAheadLogFile + var walInitState [][]byte + + c.Logger.Infof("Initializing a WAL for chain %s, on dir: %s", c.support.ChannelID(), c.WALDir) + consensusWAL, walInitState, err = wal.InitializeAndReadAll(c.Logger, c.WALDir, wal.DefaultOptions()) + if err != nil { + c.Logger.Panicf("failed to initialize a WAL for chain %s, err %s", c.support.ChannelID(), err) + } + + clusterSize := uint64(len(rtc.Nodes)) + + // report cluster size + c.Metrics.ClusterSize.Set(float64(clusterSize)) + + sync := &Synchronizer{ + selfID: rtc.id, + BlockToDecision: c.blockToDecision, + OnCommit: c.updateRuntimeConfig, + Support: c.support, + BlockPuller: c.BlockPuller, + ClusterSize: clusterSize, + Logger: c.Logger, + LatestConfig: func() (types.Configuration, []uint64) { + rtc := c.RuntimeConfig.Load().(RuntimeConfig) + return rtc.BFTConfig, rtc.Nodes + }, + } + + channelDecorator := zap.String("channel", c.support.ChannelID()) + logger := flogging.MustGetLogger("orderer.consensus.smartbft.consensus").With(channelDecorator) + + c.assembler = &Assembler{ + RuntimeConfig: c.RuntimeConfig, + VerificationSeq: c.verifier.VerificationSequence, + Logger: flogging.MustGetLogger("orderer.consensus.smartbft.assembler").With(channelDecorator), + } + + consensus := &smartbft.Consensus{ + Config: c.Config, + Logger: logger, + Verifier: c.verifier, + Signer: &Signer{ + ID: c.Config.SelfID, + Logger: flogging.MustGetLogger("orderer.consensus.smartbft.signer").With(channelDecorator), + SignerSerializer: c.SignerSerializer, + LastConfigBlockNum: func(block *cb.Block) uint64 { + if protoutil.IsConfigBlock(block) { + return block.Header.Number + } + + return c.RuntimeConfig.Load().(RuntimeConfig).LastConfigBlock.Header.Number + }, + }, + // TODO_PARAM: SHALLOW COPY IS ISSUE HERE + // Metadata: *latestMetadata, + WAL: consensusWAL, + WALInitialContent: walInitState, // Read from WAL entries + Application: c, + Assembler: c.assembler, + RequestInspector: requestInspector, + Synchronizer: sync, + Comm: &Egress{ + RuntimeConfig: c.RuntimeConfig, + Channel: c.support.ChannelID(), + Logger: flogging.MustGetLogger("orderer.consensus.smartbft.egress").With(channelDecorator), + RPC: &cluster.RPC{ + Logger: flogging.MustGetLogger("orderer.consensus.smartbft.rpc").With(channelDecorator), + Channel: c.support.ChannelID(), + StreamsByType: cluster.NewStreamsByType(), + Comm: c.Comm, + Timeout: 5 * time.Minute, // Externalize configuration + }, + }, + Scheduler: time.NewTicker(time.Second).C, + ViewChangerTicker: time.NewTicker(time.Second).C, + } + + proposal, signatures := c.lastPersistedProposalAndSignatures() + if proposal != nil { + consensus.LastProposal = *proposal + consensus.LastSignatures = signatures + } + + return consensus +} + +func (c *BFTChain) submit(env *cb.Envelope, configSeq uint64) error { + reqBytes, err := proto.Marshal(env) + if err != nil { + return errors.Wrapf(err, "failed to marshal request envelope") + } + + c.Logger.Debugf("Consensus.SubmitRequest, node id %d", c.Config.SelfID) + if err := c.consensus.SubmitRequest(reqBytes); err != nil { + return errors.Wrapf(err, "failed to submit request") + } + return nil +} + +// Order accepts a message which has been processed at a given configSeq. +// If the configSeq advances, it is the responsibility of the consenter +// to revalidate and potentially discard the message +// The consenter may return an error, indicating the message was not accepted +func (c *BFTChain) Order(env *cb.Envelope, configSeq uint64) error { + seq := c.support.Sequence() + if configSeq < seq { + c.Logger.Warnf("Normal message was validated against %d, although current config seq has advanced (%d)", configSeq, seq) + if _, err := c.support.ProcessNormalMsg(env); err != nil { + return errors.Errorf("bad normal message: %s", err) + } + } + + return c.submit(env, configSeq) +} + +// Configure accepts a message which reconfigures the channel and will +// trigger an update to the configSeq if committed. The configuration must have +// been triggered by a ConfigUpdate message. If the config sequence advances, +// it is the responsibility of the consenter to recompute the resulting config, +// discarding the message if the reconfiguration is no longer valid. +// The consenter may return an error, indicating the message was not accepted +func (c *BFTChain) Configure(config *cb.Envelope, configSeq uint64) error { + // TODO: check configuration update validity + seq := c.support.Sequence() + if configSeq < seq { + c.Logger.Warnf("Normal message was validated against %d, although current config seq has advanced (%d)", configSeq, seq) + if configEnv, _, err := c.support.ProcessConfigMsg(config); err != nil { + return errors.Errorf("bad normal message: %s", err) + } else { + return c.submit(configEnv, configSeq) + } + } + + return c.submit(config, configSeq) +} + +// Deliver delivers proposal, writes block with transactions and metadata +func (c *BFTChain) Deliver(proposal types.Proposal, signatures []types.Signature) types.Reconfig { + block, err := ProposalToBlock(proposal) + if err != nil { + c.Logger.Panicf("failed to read proposal, err: %s", err) + } + + var sigs []*cb.MetadataSignature + var ordererBlockMetadata []byte + + var signers []uint64 + + for _, s := range signatures { + sig := &Signature{} + if err := sig.Unmarshal(s.Msg); err != nil { + c.Logger.Errorf("Failed unmarshaling signature from %d: %v", s.ID, err) + c.Logger.Errorf("Offending signature Msg: %s", base64.StdEncoding.EncodeToString(s.Msg)) + c.Logger.Errorf("Offending signature Value: %s", base64.StdEncoding.EncodeToString(s.Value)) + c.Logger.Errorf("Halting chain.") + c.Halt() + return types.Reconfig{} + } + + if ordererBlockMetadata == nil { + ordererBlockMetadata = sig.OrdererBlockMetadata + } + + sigs = append(sigs, &cb.MetadataSignature{ + // AuxiliaryInput: sig.AuxiliaryInput, + Signature: s.Value, + // We do not put a signature header when we commit the block. + // Instead, we put the nonce and the identifier and at validation + // we reconstruct the signature header at runtime. + // SignatureHeader: sig.SignatureHeader, + // Nonce: sig.Nonce, + // SignerId: s.ID, + }) + + signers = append(signers, s.ID) + } + + block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = protoutil.MarshalOrPanic(&cb.Metadata{ + Value: ordererBlockMetadata, + Signatures: sigs, + }) + + var mdTotalSize int + for _, md := range block.Metadata.Metadata { + mdTotalSize += len(md) + } + + c.Logger.Infof("Delivering proposal, writing block %d with %d transactions and metadata of total size %d with signatures from %v to the ledger, node id %d", + block.Header.Number, + len(block.Data.Data), + mdTotalSize, + signers, + c.Config.SelfID) + c.Metrics.CommittedBlockNumber.Set(float64(block.Header.Number)) // report the committed block number + c.reportIsLeader() // report the leader + if protoutil.IsConfigBlock(block) { + c.support.WriteConfigBlock(block, nil) + } else { + c.support.WriteBlock(block, nil) + } + + reconfig := c.updateRuntimeConfig(block) + return reconfig +} + +// WaitReady blocks waiting for consenter to be ready for accepting new messages. +// This is useful when consenter needs to temporarily block ingress messages so +// that in-flight messages can be consumed. It could return error if consenter is +// in erroneous states. If this blocking behavior is not desired, consenter could +// simply return nil. +func (c *BFTChain) WaitReady() error { + return nil +} + +// Errored returns a channel which will close when an error has occurred. +// This is especially useful for the Deliver client, who must terminate waiting +// clients when the consenter is not up to date. +func (c *BFTChain) Errored() <-chan struct{} { + // TODO: Implement Errored + return nil +} + +// Start should allocate whatever resources are needed for staying up to date with the chain. +// Typically, this involves creating a thread which reads from the ordering source, passes those +// messages to a block cutter, and writes the resulting blocks to the ledger. +func (c *BFTChain) Start() { + if err := c.consensus.Start(); err != nil { + c.Logger.Panicf("Failed to start chain, aborting: %+v", err) + } + c.reportIsLeader() // report the leader +} + +// Halt frees the resources which were allocated for this Chain. +func (c *BFTChain) Halt() { + c.Logger.Infof("Shutting down chain") + c.consensus.Stop() +} + +func (c *BFTChain) blockToProposalWithoutSignaturesInMetadata(block *cb.Block) types.Proposal { + blockClone := proto.Clone(block).(*cb.Block) + if len(blockClone.Metadata.Metadata) > int(cb.BlockMetadataIndex_SIGNATURES) { + signatureMetadata := &cb.Metadata{} + // Nil out signatures because we carry them around separately in the library format. + if err := proto.Unmarshal(blockClone.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES], signatureMetadata); err != nil { + // nothing to do + c.Logger.Errorf("Error unmarshalling signature metadata from block: %s", err) + } + signatureMetadata.Signatures = nil + blockClone.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = protoutil.MarshalOrPanic(signatureMetadata) + } + prop := types.Proposal{ + Header: protoutil.BlockHeaderBytes(blockClone.Header), + Payload: (&ByteBufferTuple{ + A: protoutil.MarshalOrPanic(blockClone.Data), + B: protoutil.MarshalOrPanic(blockClone.Metadata), + }).ToBytes(), + VerificationSequence: int64(c.verifier.VerificationSequence()), + } + + if protoutil.IsConfigBlock(block) { + prop.VerificationSequence-- + } + + return prop +} + +func (c *BFTChain) blockToDecision(block *cb.Block) *types.Decision { + proposal := c.blockToProposalWithoutSignaturesInMetadata(block) + if block.Header.Number == 0 { + return &types.Decision{ + Proposal: proposal, + } + } + + signatureMetadata := &cb.Metadata{} + if err := proto.Unmarshal(block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES], signatureMetadata); err != nil { + c.Logger.Panicf("Failed unmarshaling signatures from block metadata: %v", err) + } + + ordererMDFromBlock := &cb.OrdererBlockMetadata{} + if err := proto.Unmarshal(signatureMetadata.Value, ordererMDFromBlock); err != nil { + c.Logger.Panicf("Failed unmarshaling OrdererBlockMetadata from block signature metadata: %v", err) + } + + proposal.Metadata = ordererMDFromBlock.ConsenterMetadata + + var signatures []types.Signature + for _, sigMD := range signatureMetadata.Signatures { + // id := sigMD.SignerId + sig := &Signature{ + // Nonce: sigMD.Nonce, + BlockHeader: protoutil.BlockHeaderBytes(block.Header), + OrdererBlockMetadata: signatureMetadata.Value, + // AuxiliaryInput: sigMD.AuxiliaryInput, + } + /* prpf := &smartbftprotos.PreparesFrom{} + if err := proto.Unmarshal(sigMD.AuxiliaryInput, prpf); err != nil { + c.Logger.Errorf("Failed unmarshaling auxiliary data") + continue + } + c.Logger.Infof("AuxiliaryInput[%d]: %v", id, prpf) */ + signatures = append(signatures, types.Signature{ + Msg: sig.Marshal(), + Value: sigMD.Signature, + // ID: id, + }) + } + + return &types.Decision{ + Signatures: signatures, + Proposal: proposal, + } +} + +// HandleMessage handles the message from the sender +func (c *BFTChain) HandleMessage(sender uint64, m *smartbftprotos.Message) { + c.Logger.Debugf("Message from %d", sender) + c.consensus.HandleMessage(sender, m) +} + +// HandleRequest handles the request from the sender +func (c *BFTChain) HandleRequest(sender uint64, req []byte) { + c.Logger.Debugf("HandleRequest from %d", sender) + c.consensus.SubmitRequest(req) +} + +func (c *BFTChain) updateRuntimeConfig(block *cb.Block) types.Reconfig { + prevRTC := c.RuntimeConfig.Load().(RuntimeConfig) + newRTC, err := prevRTC.BlockCommitted(block, c.bccsp) + if err != nil { + c.Logger.Errorf("Failed constructing RuntimeConfig from block %d, halting chain", block.Header.Number) + c.Halt() + return types.Reconfig{} + } + c.RuntimeConfig.Store(newRTC) + if protoutil.IsConfigBlock(block) { + c.Comm.Configure(c.Channel, newRTC.RemoteNodes) + } + + membershipDidNotChange := reflect.DeepEqual(newRTC.Nodes, prevRTC.Nodes) + configDidNotChange := reflect.DeepEqual(newRTC.BFTConfig, prevRTC.BFTConfig) + noChangeDetected := membershipDidNotChange && configDidNotChange + return types.Reconfig{ + InLatestDecision: !noChangeDetected, + CurrentNodes: newRTC.Nodes, + CurrentConfig: newRTC.BFTConfig, + } +} + +func (c *BFTChain) lastPersistedProposalAndSignatures() (*types.Proposal, []types.Signature) { + lastBlock := LastBlockFromLedgerOrPanic(c.support, c.Logger) + // initial report of the last committed block number + c.Metrics.CommittedBlockNumber.Set(float64(lastBlock.Header.Number)) + decision := c.blockToDecision(lastBlock) + return &decision.Proposal, decision.Signatures +} + +func (c *BFTChain) reportIsLeader() { + leaderID := c.consensus.GetLeaderID() + c.Metrics.LeaderID.Set(float64(leaderID)) + + if leaderID == c.Config.SelfID { + c.Metrics.IsLeader.Set(1) + } else { + c.Metrics.IsLeader.Set(0) + } +} + +// StatusReport returns the ConsensusRelation & Status +func (c *BFTChain) StatusReport() (types2.ConsensusRelation, types2.Status) { + c.statusReportMutex.Lock() + defer c.statusReportMutex.Unlock() + + return c.consensusRelation, c.status +} + +func buildVerifier( + cv ConfigValidator, + runtimeConfig *atomic.Value, + support consensus.ConsenterSupport, + requestInspector *RequestInspector, + policyManager policies.Manager, +) *Verifier { + channelDecorator := zap.String("channel", support.ChannelID()) + logger := flogging.MustGetLogger("orderer.consensus.smartbft.verifier").With(channelDecorator) + return &Verifier{ + ConfigValidator: cv, + VerificationSequencer: support, + ReqInspector: requestInspector, + Logger: logger, + RuntimeConfig: runtimeConfig, + ConsenterVerifier: &consenterVerifier{ + logger: logger, + channel: support.ChannelID(), + policyManager: policyManager, + }, + + AccessController: &chainACL{ + policyManager: policyManager, + Logger: logger, + }, + Ledger: support, + } +} + +type chainACL struct { + policyManager policies.Manager + Logger *flogging.FabricLogger +} + +// Evaluate evaluates signed data +func (c *chainACL) Evaluate(signatureSet []*protoutil.SignedData) error { + policy, ok := c.policyManager.GetPolicy(policies.ChannelWriters) + if !ok { + return fmt.Errorf("could not find policy %s", policies.ChannelWriters) + } + + err := policy.EvaluateSignedData(signatureSet) + if err != nil { + c.Logger.Debugf("SigFilter evaluation failed: %s, policyName: %s", err.Error(), policies.ChannelWriters) + return errors.Wrap(errors.WithStack(msgprocessor.ErrPermissionDenied), err.Error()) + } + return nil +} diff --git a/orderer/consensus/smartbft/configverifier.go b/orderer/consensus/smartbft/configverifier.go new file mode 100644 index 00000000000..b7baf568b68 --- /dev/null +++ b/orderer/consensus/smartbft/configverifier.go @@ -0,0 +1,179 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/configtx" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +//go:generate mockery -dir . -name Filters -case underscore -output mocks + +// Filters applies the filters on the outer envelope +type Filters interface { + ApplyFilters(channel string, env *cb.Envelope) error +} + +//go:generate mockery -dir . -name ChannelConfigTemplator -case underscore -output mocks + +// ChannelConfigTemplator returnes a channel creation transaction to the system channel +type ChannelConfigTemplator interface { + NewChannelConfig(env *cb.Envelope) (channelconfig.Resources, error) +} + +//go:generate mockery -dir . -name ConfigUpdateProposer -case underscore -output mocks + +// ConfigUpdateProposer produces a ConfigEnvelope +type ConfigUpdateProposer interface { + ProposeConfigUpdate(channel string, configtx *cb.Envelope) (*cb.ConfigEnvelope, error) +} + +//go:generate mockery -dir . -name Bundle -case underscore -output mocks + +// Bundle defines the channelconfig resources interface +type Bundle interface { + channelconfig.Resources +} + +//go:generate mockery -dir . -name ConfigTxValidator -case underscore -output mocks + +// ConfigTxValidator defines the configtx validator interface +type ConfigTxValidator interface { + configtx.Validator +} + +// ConfigBlockValidator struct +type ConfigBlockValidator struct { + ChannelConfigTemplator ChannelConfigTemplator + ConfigUpdateProposer ConfigUpdateProposer + ValidatingChannel string + Filters Filters + Logger *flogging.FabricLogger +} + +// ValidateConfig validates config from envelope +func (cbv *ConfigBlockValidator) ValidateConfig(envelope *cb.Envelope) error { + payload, err := protoutil.UnmarshalPayload(envelope.Payload) + if err != nil { + return err + } + + if payload.Header == nil { + return fmt.Errorf("no header was set") + } + + if payload.Header.ChannelHeader == nil { + return fmt.Errorf("no channel header was set") + } + + chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) + if err != nil { + return fmt.Errorf("channel header unmarshalling error: %s", err) + } + + switch chdr.Type { + case int32(cb.HeaderType_CONFIG): + configEnvelope := &cb.ConfigEnvelope{} + if err = proto.Unmarshal(payload.Data, configEnvelope); err != nil { + return fmt.Errorf("data unmarshalling error: %s", err) + } + return cbv.verifyConfigUpdateMsg(envelope, configEnvelope, chdr) + + case int32(cb.HeaderType_ORDERER_TRANSACTION): + env, err := protoutil.UnmarshalEnvelope(payload.Data) + if err != nil { + return fmt.Errorf("data unmarshalling error: %s", err) + } + + configEnvelope := &cb.ConfigEnvelope{} + _, err = protoutil.UnmarshalEnvelopeOfType(env, cb.HeaderType_CONFIG, configEnvelope) + if err != nil { + return fmt.Errorf("data unmarshalling error: %s", err) + } + return cbv.verifyConfigUpdateMsg(envelope, configEnvelope, chdr) + + default: + return errors.Errorf("unexpected envelope type %s", cb.HeaderType_name[chdr.Type]) + } +} + +func (cbv *ConfigBlockValidator) verifyConfigUpdateMsg(outEnv *cb.Envelope, confEnv *cb.ConfigEnvelope, chdr *cb.ChannelHeader) error { + if confEnv == nil || confEnv.LastUpdate == nil || confEnv.Config == nil { + return errors.New("invalid config envelope") + } + envPayload, err := protoutil.UnmarshalPayload(confEnv.LastUpdate.Payload) + if err != nil { + return err + } + + if envPayload.Header == nil { + return errors.New("inner header is nil") + } + + if envPayload.Header.ChannelHeader == nil { + return errors.New("inner channelheader is nil") + } + + typ := cb.HeaderType(chdr.Type) + + cbv.Logger.Infof("Applying filters for config update of type %s to channel %s", typ, chdr.ChannelId) + + // First apply the filters on the outer envelope, regardless of the type of transaction it is. + if err := cbv.Filters.ApplyFilters(chdr.ChannelId, outEnv); err != nil { + return err + } + + var expectedConfigEnv *cb.ConfigEnvelope + channelID, err := protoutil.ChannelID(confEnv.LastUpdate) + if err != nil { + return errors.Errorf("error extracting channel ID from config update") + } + + if cbv.ValidatingChannel != channelID { + if cb.HeaderType(chdr.Type) != cb.HeaderType_ORDERER_TRANSACTION { + // If we reached here, then it's a Config transaction to the wrong channel, so abort it. + return errors.Errorf("header type is %s but channel is %s", typ, chdr.ChannelId) + } + + // Else it's a channel creation transaction to the system channel. + bundle, err := cbv.ChannelConfigTemplator.NewChannelConfig(confEnv.LastUpdate) + if err != nil { + cbv.Logger.Errorf("cannot construct new config from last update: %v", err) + return err + } + + expectedConfigEnv, err = bundle.ConfigtxValidator().ProposeConfigUpdate(confEnv.LastUpdate) + if err != nil { + cbv.Logger.Errorf("Rejecting config update due to %v", err) + return err + } + } else { + if cb.HeaderType(chdr.Type) == cb.HeaderType_ORDERER_TRANSACTION { + return errors.Errorf("expected config transaction but got orderer transaction") + } + expectedConfigEnv, err = cbv.ConfigUpdateProposer.ProposeConfigUpdate(chdr.ChannelId, confEnv.LastUpdate) + if err != nil { + cbv.Logger.Errorf("Rejecting config proposal due to %v", err) + return err + } + } + + // Extract the Config from the result of ProposeConfigUpdate, and compare it + // with the pending config. + if proto.Equal(confEnv.Config, expectedConfigEnv.Config) { + return nil + } + cbv.Logger.Errorf("Pending Config is %v, but it should be %v", confEnv.Config, expectedConfigEnv.Config) + return errors.Errorf("pending config does not match calculated expected config") +} diff --git a/orderer/consensus/smartbft/consenter.go b/orderer/consensus/smartbft/consenter.go new file mode 100644 index 00000000000..e20f511f0d8 --- /dev/null +++ b/orderer/consensus/smartbft/consenter.go @@ -0,0 +1,329 @@ +/* + * + * Copyright IBM Corp. All Rights Reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * / + * + */ + +package smartbft + +import ( + "bytes" + "crypto/x509" + "encoding/pem" + "path" + "reflect" + + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + ab "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric-protos-go/orderer/smartbft" + "github.com/hyperledger/fabric/bccsp" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/crypto" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/common/metrics" + "github.com/hyperledger/fabric/common/policies" + "github.com/hyperledger/fabric/internal/pkg/comm" + "github.com/hyperledger/fabric/orderer/common/cluster" + "github.com/hyperledger/fabric/orderer/common/localconfig" + "github.com/hyperledger/fabric/orderer/common/multichannel" + "github.com/hyperledger/fabric/orderer/consensus" + "github.com/hyperledger/fabric/orderer/consensus/etcdraft" + "github.com/hyperledger/fabric/orderer/consensus/inactive" + "github.com/hyperledger/fabric/protoutil" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +// CreateChainCallback creates a new chain +type CreateChainCallback func() + +// ChainGetter obtains instances of ChainSupport for the given channel +type ChainGetter interface { + // GetChain obtains the ChainSupport for the given channel. + // Returns nil, false when the ChainSupport for the given channel + // isn't found. + GetChain(chainID string) *multichannel.ChainSupport +} + +// PolicyManagerRetriever is the policy manager retriever function +type PolicyManagerRetriever func(channel string) policies.Manager + +//go:generate mockery -dir . -name InactiveChainRegistry -case underscore -output mocks + +// InactiveChainRegistry registers chains that are inactive +type InactiveChainRegistry interface { + // TrackChain tracks a chain with the given name, and calls the given callback + // when this chain should be created. + TrackChain(chainName string, genesisBlock *cb.Block, createChain func()) + // Stop stops the InactiveChainRegistry. This is used when removing the + // system channel. + Stop() +} + +// Consenter implementation of the BFT smart based consenter +type Consenter struct { + CreateChain func(chainName string) + InactiveChainRegistry InactiveChainRegistry + GetPolicyManager PolicyManagerRetriever + Logger *flogging.FabricLogger + Cert []byte + Comm *cluster.Comm + Chains ChainGetter + SignerSerializer SignerSerializer + Registrar *multichannel.Registrar + WALBaseDir string + ClusterDialer *cluster.PredicateDialer + Conf *localconfig.TopLevel + Metrics *Metrics + BCCSP bccsp.BCCSP +} + +// New creates Consenter of type smart bft +func New( + icr InactiveChainRegistry, + pmr PolicyManagerRetriever, + signerSerializer SignerSerializer, + clusterDialer *cluster.PredicateDialer, + conf *localconfig.TopLevel, + srvConf comm.ServerConfig, + srv *comm.GRPCServer, + r *multichannel.Registrar, + metricsProvider metrics.Provider, + BCCSP bccsp.BCCSP, +) *Consenter { + logger := flogging.MustGetLogger("orderer.consensus.smartbft") + + metrics := cluster.NewMetrics(metricsProvider) + + var walConfig WALConfig + err := mapstructure.Decode(conf.Consensus, &walConfig) + if err != nil { + logger.Panicf("Failed to decode consensus configuration: %s", err) + } + + logger.Infof("WAL Directory is %s", walConfig.WALDir) + + consenter := &Consenter{ + InactiveChainRegistry: icr, + Registrar: r, + GetPolicyManager: pmr, + Conf: conf, + ClusterDialer: clusterDialer, + Logger: logger, + Cert: srvConf.SecOpts.Certificate, + Chains: r, + SignerSerializer: signerSerializer, + WALBaseDir: walConfig.WALDir, + Metrics: NewMetrics(metricsProvider), + CreateChain: r.CreateChain, + BCCSP: BCCSP, + } + + compareCert := cluster.CachePublicKeyComparisons(func(a, b []byte) bool { + err := crypto.CertificatesWithSamePublicKey(a, b) + if err != nil && err != crypto.ErrPubKeyMismatch { + crypto.LogNonPubKeyMismatchErr(logger.Errorf, err, a, b) + } + return err == nil + }) + + consenter.Comm = &cluster.Comm{ + MinimumExpirationWarningInterval: cluster.MinimumExpirationWarningInterval, + CertExpWarningThreshold: conf.General.Cluster.CertExpirationWarningThreshold, + SendBufferSize: conf.General.Cluster.SendBufferSize, + Logger: flogging.MustGetLogger("orderer.common.cluster"), + Chan2Members: make(map[string]cluster.MemberMapping), + Connections: cluster.NewConnectionStore(clusterDialer, metrics.EgressTLSConnectionCount), + Metrics: metrics, + ChanExt: consenter, + H: &Ingress{ + Logger: logger, + ChainSelector: consenter, + }, + CompareCertificate: compareCert, + } + + svc := &cluster.Service{ + CertExpWarningThreshold: conf.General.Cluster.CertExpirationWarningThreshold, + MinimumExpirationWarningInterval: cluster.MinimumExpirationWarningInterval, + StreamCountReporter: &cluster.StreamCountReporter{ + Metrics: metrics, + }, + StepLogger: flogging.MustGetLogger("orderer.common.cluster.step"), + Logger: flogging.MustGetLogger("orderer.common.cluster"), + Dispatcher: consenter.Comm, + } + + ab.RegisterClusterServer(srv.Server(), svc) + + return consenter +} + +// ReceiverByChain returns the MessageReceiver for the given channelID or nil if not found. +func (c *Consenter) ReceiverByChain(channelID string) MessageReceiver { + cs := c.Chains.GetChain(channelID) + if cs == nil { + return nil + } + if cs.Chain == nil { + c.Logger.Panicf("Programming error - Chain %s is nil although it exists in the mapping", channelID) + } + if smartBFTChain, isBFTSmart := cs.Chain.(*BFTChain); isBFTSmart { + return smartBFTChain + } + c.Logger.Warningf("Chain %s is of type %v and not smartbft.Chain", channelID, reflect.TypeOf(cs.Chain)) + return nil +} + +// HandleChain returns a new Chain instance or an error upon failure +func (c *Consenter) HandleChain(support consensus.ConsenterSupport, metadata *cb.Metadata) (consensus.Chain, error) { + configOptions := &smartbft.Options{} + consenters := support.SharedConfig().Consenters() + if err := proto.Unmarshal(support.SharedConfig().ConsensusMetadata(), configOptions); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal consensus metadata") + } + + selfID, err := c.detectSelfID(consenters) + if err != nil { + if c.InactiveChainRegistry != nil { + c.Logger.Errorf("channel %s is not serviced by me", support.ChannelID()) + c.InactiveChainRegistry.TrackChain(support.ChannelID(), support.Block(0), func() { + c.CreateChain(support.ChannelID()) + }) + return &inactive.Chain{Err: errors.Errorf("channel %s is not serviced by me", support.ChannelID())}, nil + } + + return nil, errors.Wrap(err, "without a system channel, a follower should have been created") + } + c.Logger.Infof("Local consenter id is %d", selfID) + + puller, err := newBlockPuller(support, c.ClusterDialer, c.Conf.General.Cluster, c.BCCSP) + if err != nil { + c.Logger.Panicf("Failed initializing block puller") + } + + config, err := configFromMetadataOptions((uint64)(selfID), configOptions) + if err != nil { + return nil, errors.Wrap(err, "failed parsing smartbft configuration") + } + c.Logger.Debugf("SmartBFT-Go config: %+v", config) + + configValidator := &ConfigBlockValidator{ + ChannelConfigTemplator: c.Registrar, + ValidatingChannel: support.ChannelID(), + Filters: c.Registrar, + ConfigUpdateProposer: c.Registrar, + Logger: c.Logger, + } + + chain, err := NewChain(configValidator, (uint64)(selfID), config, path.Join(c.WALBaseDir, support.ChannelID()), puller, c.Comm, c.SignerSerializer, c.GetPolicyManager(support.ChannelID()), support, c.Metrics, c.BCCSP) + if err != nil { + return nil, errors.Wrap(err, "failed creating a new BFTChain") + } + + return chain, nil +} + +func (c *Consenter) IsChannelMember(joinBlock *cb.Block) (bool, error) { + if joinBlock == nil { + return false, errors.New("nil block") + } + envelopeConfig, err := protoutil.ExtractEnvelope(joinBlock, 0) + if err != nil { + return false, err + } + bundle, err := channelconfig.NewBundleFromEnvelope(envelopeConfig, c.BCCSP) + if err != nil { + return false, err + } + oc, exists := bundle.OrdererConfig() + if !exists { + return false, errors.New("no orderer config in bundle") + } + configOptions := &smartbft.Options{} + if err := proto.Unmarshal(oc.ConsensusMetadata(), configOptions); err != nil { + return false, err + } + + verifyOpts, err := etcdraft.CreateX509VerifyOptions(oc) + if err != nil { + return false, errors.Wrapf(err, "failed to create x509 verify options from orderer config") + } + + if err := VerifyConfigMetadata(configOptions, verifyOpts); err != nil { + return false, errors.Wrapf(err, "failed to validate config metadata of ordering config") + } + + member := false + for _, consenter := range oc.Consenters() { + if bytes.Equal(c.Cert, consenter.ServerTlsCert) || bytes.Equal(c.Cert, consenter.ClientTlsCert) { + member = true + break + } + } + + return member, nil +} + +// RemoveInactiveChainRegistry stops and removes the inactive chain registry. +// This is used when removing the system channel. +func (c *Consenter) RemoveInactiveChainRegistry() { + if c.InactiveChainRegistry == nil { + return + } + c.InactiveChainRegistry.Stop() + c.InactiveChainRegistry = nil +} + +// TargetChannel extracts the channel from the given proto.Message. +// Returns an empty string on failure. +func (c *Consenter) TargetChannel(message proto.Message) string { + switch req := message.(type) { + case *ab.ConsensusRequest: + return req.Channel + case *ab.SubmitRequest: + return req.Channel + default: + return "" + } +} + +func pemToDER(pemBytes []byte, id uint64, certType string, logger *flogging.FabricLogger) ([]byte, error) { + bl, _ := pem.Decode(pemBytes) + if bl == nil { + logger.Errorf("Rejecting PEM block of %s TLS cert for node %d, offending PEM is: %s", certType, id, string(pemBytes)) + return nil, errors.Errorf("invalid PEM block") + } + return bl.Bytes, nil +} + +func (c *Consenter) detectSelfID(consenters []*cb.Consenter) (uint32, error) { + var serverCertificates []string + for _, cst := range consenters { + serverCertificates = append(serverCertificates, string(cst.ServerTlsCert)) + if bytes.Equal(c.Cert, cst.ServerTlsCert) { + return cst.Id, nil + } + } + + c.Logger.Warning("Could not find", string(c.Cert), "among", serverCertificates) + return 0, cluster.ErrNotInChannel +} + +// VerifyConfigMetadata validates SmartBFT config metadata. +// Note: ignores certificates expiration. +func VerifyConfigMetadata(options *smartbft.Options, verifyOpts x509.VerifyOptions) error { + if options == nil { + // defensive check. this should not happen as CheckConfigMetadata + // should always be called with non-nil config metadata + return errors.Errorf("nil SmartBFT config options") + } + + // todo: check metadata + + return nil +} diff --git a/orderer/consensus/smartbft/egress.go b/orderer/consensus/smartbft/egress.go new file mode 100644 index 00000000000..d1dbd079795 --- /dev/null +++ b/orderer/consensus/smartbft/egress.go @@ -0,0 +1,79 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "sync/atomic" + + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + ab "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/protoutil" +) + +//go:generate mockery -dir . -name RPC -case underscore -output mocks + +// RPC sends a consensus and submits a request +type RPC interface { + SendConsensus(dest uint64, msg *ab.ConsensusRequest) error + // SendSubmit(dest uint64, request *ab.SubmitRequest) error + SendSubmit(destination uint64, request *ab.SubmitRequest, report func(error)) error +} + +// Logger specifies the logger +type Logger interface { + Warnf(template string, args ...interface{}) + Panicf(template string, args ...interface{}) +} + +// Egress implementation +type Egress struct { + Channel string + RPC RPC + Logger Logger + RuntimeConfig *atomic.Value +} + +// Nodes returns nodes from the runtime config +func (e *Egress) Nodes() []uint64 { + nodes := e.RuntimeConfig.Load().(RuntimeConfig).Nodes + var res []uint64 + for _, n := range nodes { + res = append(res, (uint64)(n)) + } + return res +} + +// SendConsensus sends the BFT message to the cluster +func (e *Egress) SendConsensus(targetID uint64, m *protos.Message) { + err := e.RPC.SendConsensus(targetID, bftMsgToClusterMsg(m, e.Channel)) + if err != nil { + e.Logger.Warnf("Failed sending to %d: %v", targetID, err) + } +} + +// SendTransaction sends the transaction to the cluster +func (e *Egress) SendTransaction(targetID uint64, request []byte) { + env := &cb.Envelope{} + err := proto.Unmarshal(request, env) + if err != nil { + e.Logger.Panicf("Failed unmarshaling request %v to envelope: %v", request, err) + } + msg := &ab.SubmitRequest{ + Channel: e.Channel, + Payload: env, + } + e.RPC.SendSubmit(targetID, msg, nil) +} + +func bftMsgToClusterMsg(message *protos.Message, channel string) *ab.ConsensusRequest { + return &ab.ConsensusRequest{ + Payload: protoutil.MarshalOrPanic(message), + Channel: channel, + } +} diff --git a/orderer/consensus/smartbft/ingress.go b/orderer/consensus/smartbft/ingress.go new file mode 100644 index 00000000000..b44ca89c702 --- /dev/null +++ b/orderer/consensus/smartbft/ingress.go @@ -0,0 +1,68 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + ab "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +//go:generate mockery -dir . -name MessageReceiver -case underscore -output mocks + +// MessageReceiver receives messages +type MessageReceiver interface { + HandleMessage(sender uint64, m *protos.Message) + HandleRequest(sender uint64, req []byte) +} + +//go:generate mockery -dir . -name ReceiverGetter -case underscore -output mocks + +// ReceiverGetter obtains instances of MessageReceiver given a channel ID +type ReceiverGetter interface { + // ReceiverByChain returns the MessageReceiver if it exists, or nil if it doesn't + ReceiverByChain(channelID string) MessageReceiver +} + +type WarningLogger interface { + Warningf(template string, args ...interface{}) +} + +// Ingress dispatches Submit and Step requests to the designated per chain instances +type Ingress struct { + Logger WarningLogger + ChainSelector ReceiverGetter +} + +// OnConsensus notifies the Ingress for a reception of a StepRequest from a given sender on a given channel +func (in *Ingress) OnConsensus(channel string, sender uint64, request *ab.ConsensusRequest) error { + receiver := in.ChainSelector.ReceiverByChain(channel) + if receiver == nil { + in.Logger.Warningf("An attempt to send a consensus request to a non existing channel (%s) was made by %d", channel, sender) + return errors.Errorf("channel %s doesn't exist", channel) + } + msg := &protos.Message{} + if err := proto.Unmarshal(request.Payload, msg); err != nil { + in.Logger.Warningf("Malformed message: %v", err) + return errors.Wrap(err, "malformed message") + } + receiver.HandleMessage(sender, msg) + return nil +} + +// OnSubmit notifies the Ingress for a reception of a SubmitRequest from a given sender on a given channel +func (in *Ingress) OnSubmit(channel string, sender uint64, request *ab.SubmitRequest) error { + receiver := in.ChainSelector.ReceiverByChain(channel) + if receiver == nil { + in.Logger.Warningf("An attempt to submit a transaction to a non existing channel (%s) was made by %d", channel, sender) + return errors.Errorf("channel %s doesn't exist", channel) + } + receiver.HandleRequest(sender, protoutil.MarshalOrPanic(request.Payload)) + return nil +} diff --git a/orderer/consensus/smartbft/metrics.go b/orderer/consensus/smartbft/metrics.go new file mode 100644 index 00000000000..e8578814214 --- /dev/null +++ b/orderer/consensus/smartbft/metrics.go @@ -0,0 +1,62 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import "github.com/hyperledger/fabric/common/metrics" + +var ( + clusterSizeOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "smartbft", + Name: "cluster_size", + Help: "Number of nodes in this channel.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + committedBlockNumberOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "smartbft", + Name: "committed_block_number", + Help: "The number of the latest committed block.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + isLeaderOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "smartbft", + Name: "is_leader", + Help: "The leadership status of the current node according to the latest committed block: 1 if it is the leader else 0.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + leaderIDOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "smartbft", + Name: "leader_id", + Help: "The id of the current leader according to the latest committed block.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } +) + +// Metrics defines the metrics for the cluster. +type Metrics struct { + ClusterSize metrics.Gauge + CommittedBlockNumber metrics.Gauge + IsLeader metrics.Gauge + LeaderID metrics.Gauge +} + +// NewMetrics creates the Metrics +func NewMetrics(p metrics.Provider) *Metrics { + return &Metrics{ + ClusterSize: p.NewGauge(clusterSizeOpts), + CommittedBlockNumber: p.NewGauge(committedBlockNumberOpts), + IsLeader: p.NewGauge(isLeaderOpts), + LeaderID: p.NewGauge(leaderIDOpts), + } +} diff --git a/orderer/consensus/smartbft/signature.go b/orderer/consensus/smartbft/signature.go new file mode 100644 index 00000000000..dcc5de46e1d --- /dev/null +++ b/orderer/consensus/smartbft/signature.go @@ -0,0 +1,97 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "encoding/asn1" + "math/big" + + "github.com/SmartBFT-Go/consensus/pkg/types" + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric/common/util" + "github.com/pkg/errors" +) + +// Signature implementation +type Signature struct { + Nonce []byte + SignatureHeader []byte + BlockHeader []byte + OrdererBlockMetadata []byte + AuxiliaryInput []byte +} + +// Unmarshal the signature +func (sig *Signature) Unmarshal(bytes []byte) error { + _, err := asn1.Unmarshal(bytes, sig) + return err +} + +// Marshal the signature +func (sig *Signature) Marshal() []byte { + bytes, err := asn1.Marshal(*sig) + if err != nil { + panic(err) + } + return bytes +} + +// AsBytes returns the message to sign +func (sig Signature) AsBytes() []byte { + msg2Sign := util.ConcatenateBytes(sig.OrdererBlockMetadata, sig.SignatureHeader, sig.BlockHeader, sig.AuxiliaryInput) + return msg2Sign +} + +// ProposalToBlock marshals the proposal the the block +func ProposalToBlock(proposal types.Proposal) (*cb.Block, error) { + // initialize block with empty fields + block := &cb.Block{ + Data: &cb.BlockData{}, + Metadata: &cb.BlockMetadata{}, + } + + if len(proposal.Header) == 0 { + return nil, errors.New("proposal header cannot be nil") + } + + hdr := &asn1Header{} + + if _, err := asn1.Unmarshal(proposal.Header, hdr); err != nil { + return nil, errors.Wrap(err, "bad header") + } + + block.Header = &cb.BlockHeader{ + Number: hdr.Number.Uint64(), + PreviousHash: hdr.PreviousHash, + DataHash: hdr.DataHash, + } + + if len(proposal.Payload) == 0 { + return nil, errors.New("proposal payload cannot be nil") + } + + tuple := &ByteBufferTuple{} + if err := tuple.FromBytes(proposal.Payload); err != nil { + return nil, errors.Wrap(err, "bad payload and metadata tuple") + } + + if err := proto.Unmarshal(tuple.A, block.Data); err != nil { + return nil, errors.Wrap(err, "bad payload") + } + + if err := proto.Unmarshal(tuple.B, block.Metadata); err != nil { + return nil, errors.Wrap(err, "bad metadata") + } + return block, nil +} + +type asn1Header struct { + Number *big.Int + PreviousHash []byte + DataHash []byte +} diff --git a/orderer/consensus/smartbft/signer.go b/orderer/consensus/smartbft/signer.go new file mode 100644 index 00000000000..98ab0710e96 --- /dev/null +++ b/orderer/consensus/smartbft/signer.go @@ -0,0 +1,92 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "github.com/SmartBFT-Go/consensus/pkg/types" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric/common/crypto" + "github.com/hyperledger/fabric/protoutil" +) + +//go:generate mockery -dir . -name SignerSerializer -case underscore -output ./mocks/ + +// SignerSerializer signs messages and serializes identities +type SignerSerializer interface { + crypto.Signer + crypto.IdentitySerializer +} + +// Signer implementation +type Signer struct { + ID uint64 + SignerSerializer SignerSerializer + Logger Logger + LastConfigBlockNum func(*cb.Block) uint64 +} + +// Sign signs the message +func (s *Signer) Sign(msg []byte) []byte { + signature, err := s.SignerSerializer.Sign(msg) + if err != nil { + s.Logger.Panicf("Failed signing message: %v", err) + } + return signature +} + +// SignProposal signs the proposal +func (s *Signer) SignProposal(proposal types.Proposal, auxiliaryInput []byte) *types.Signature { + block, err := ProposalToBlock(proposal) + if err != nil { + s.Logger.Panicf("Tried to sign bad proposal: %v", err) + } + + nonce := randomNonceOrPanic() + + sig := Signature{ + AuxiliaryInput: auxiliaryInput, + Nonce: nonce, + BlockHeader: protoutil.BlockHeaderBytes(block.Header), + SignatureHeader: protoutil.MarshalOrPanic(s.newSignatureHeaderOrPanic(nonce)), + OrdererBlockMetadata: protoutil.MarshalOrPanic(&cb.OrdererBlockMetadata{ + LastConfig: &cb.LastConfig{Index: uint64(s.LastConfigBlockNum(block))}, + ConsenterMetadata: proposal.Metadata, + }), + } + + signature := protoutil.SignOrPanic(s.SignerSerializer, sig.AsBytes()) + + // Nil out the signature header after creating the signature + sig.SignatureHeader = nil + + return &types.Signature{ + ID: s.ID, + Value: signature, + Msg: sig.Marshal(), + } +} + +// NewSignatureHeader creates a SignatureHeader with the correct signing identity and a valid nonce +func (s *Signer) newSignatureHeaderOrPanic(nonce []byte) *cb.SignatureHeader { + creator, err := s.SignerSerializer.Serialize() + if err != nil { + panic(err) + } + + return &cb.SignatureHeader{ + Creator: creator, + Nonce: nonce, + } +} + +func randomNonceOrPanic() []byte { + nonce, err := crypto.GetRandomNonce() + if err != nil { + panic(err) + } + return nonce +} diff --git a/orderer/consensus/smartbft/synchronizer.go b/orderer/consensus/smartbft/synchronizer.go new file mode 100644 index 00000000000..0fa8dc945d9 --- /dev/null +++ b/orderer/consensus/smartbft/synchronizer.go @@ -0,0 +1,165 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "sort" + + "github.com/SmartBFT-Go/consensus/pkg/types" + "github.com/SmartBFT-Go/consensus/smartbftprotos" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/orderer/consensus" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +// Synchronizer implementation +type Synchronizer struct { + lastReconfig types.Reconfig + selfID uint64 + LatestConfig func() (types.Configuration, []uint64) + BlockToDecision func(*cb.Block) *types.Decision + OnCommit func(*cb.Block) types.Reconfig + Support consensus.ConsenterSupport + BlockPuller BlockPuller + ClusterSize uint64 + Logger *flogging.FabricLogger +} + +// Close closes the block puller connection +func (s *Synchronizer) Close() { + s.BlockPuller.Close() +} + +// Sync synchronizes blocks and returns the response +func (s *Synchronizer) Sync() types.SyncResponse { + decision, err := s.synchronize() + if err != nil { + s.Logger.Warnf("Could not synchronize with remote peers due to %s, returning state from local ledger", err) + block := s.Support.Block(s.Support.Height() - 1) + config, nodes := s.LatestConfig() + return types.SyncResponse{ + Latest: *s.BlockToDecision(block), + Reconfig: types.ReconfigSync{ + InReplicatedDecisions: false, // If we read from ledger we do not need to reconfigure. + CurrentNodes: nodes, + CurrentConfig: config, + }, + } + } + + // After sync has ended, reset the state of the last reconfig. + defer func() { + s.lastReconfig = types.Reconfig{} + }() + return types.SyncResponse{ + Latest: *decision, + Reconfig: types.ReconfigSync{ + InReplicatedDecisions: s.lastReconfig.InLatestDecision, + CurrentConfig: s.lastReconfig.CurrentConfig, + CurrentNodes: s.lastReconfig.CurrentNodes, + }, + } +} + +func (s *Synchronizer) getViewMetadataLastConfigSqnFromBlock(block *cb.Block) (*smartbftprotos.ViewMetadata, uint64) { + viewMetadata, err := getViewMetadataFromBlock(block) + if err != nil { + return nil, 0 + } + + lastConfigSqn := s.Support.Sequence() + + return viewMetadata, lastConfigSqn +} + +func (s *Synchronizer) synchronize() (*types.Decision, error) { + defer s.BlockPuller.Close() + heightByEndpoint, err := s.BlockPuller.HeightsByEndpoints() + if err != nil { + return nil, errors.Wrap(err, "cannot get HeightsByEndpoints") + } + + s.Logger.Infof("HeightsByEndpoints: %v", heightByEndpoint) + + if len(heightByEndpoint) == 0 { + return nil, errors.New("no cluster members to synchronize with") + } + + var heights []uint64 + for _, value := range heightByEndpoint { + heights = append(heights, value) + } + + targetHeight := s.computeTargetHeight(heights) + startHeight := s.Support.Height() + if startHeight >= targetHeight { + return nil, errors.Errorf("already at height of %d", targetHeight) + } + + targetSeq := targetHeight - 1 + seq := startHeight + + var blocksFetched int + + s.Logger.Debugf("Will fetch sequences [%d-%d]", seq, targetSeq) + + var lastPulledBlock *cb.Block + for seq <= targetSeq { + block := s.BlockPuller.PullBlock(seq) + if block == nil { + s.Logger.Debugf("Failed to fetch block [%d] from cluster", seq) + break + } + if protoutil.IsConfigBlock(block) { + s.Support.WriteConfigBlock(block, nil) + } else { + s.Support.WriteBlock(block, nil) + } + s.Logger.Debugf("Fetched and committed block [%d] from cluster", seq) + lastPulledBlock = block + + prevInLatestDecision := s.lastReconfig.InLatestDecision + s.lastReconfig = s.OnCommit(lastPulledBlock) + s.lastReconfig.InLatestDecision = s.lastReconfig.InLatestDecision || prevInLatestDecision + seq++ + blocksFetched++ + } + + if lastPulledBlock == nil { + return nil, errors.Errorf("failed pulling block %d", seq) + } + + startSeq := startHeight + s.Logger.Infof("Finished synchronizing with cluster, fetched %d blocks, starting from block [%d], up until and including block [%d]", + blocksFetched, startSeq, lastPulledBlock.Header.Number) + + viewMetadata, lastConfigSqn := s.getViewMetadataLastConfigSqnFromBlock(lastPulledBlock) + + s.Logger.Infof("Returning view metadata of %v, lastConfigSeq %d", viewMetadata, lastConfigSqn) + return s.BlockToDecision(lastPulledBlock), nil +} + +// computeTargetHeight compute the target height to synchronize to. +// +// heights: a slice containing the heights of accessible peers, length must be >0. +// clusterSize: the cluster size, must be >0. +func (s *Synchronizer) computeTargetHeight(heights []uint64) uint64 { + sort.Slice(heights, func(i, j int) bool { return heights[i] > heights[j] }) // Descending + f := uint64(s.ClusterSize-1) / 3 // The number of tolerated byzantine faults + lenH := uint64(len(heights)) + + s.Logger.Debugf("Heights: %v", heights) + + if lenH < f+1 { + s.Logger.Debugf("Returning %d", heights[0]) + return heights[int(lenH)-1] + } + s.Logger.Debugf("Returning %d", heights[f]) + return heights[f] +} diff --git a/orderer/consensus/smartbft/util.go b/orderer/consensus/smartbft/util.go new file mode 100644 index 00000000000..744b2ca2c5e --- /dev/null +++ b/orderer/consensus/smartbft/util.go @@ -0,0 +1,455 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "bytes" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "fmt" + "sort" + "time" + + "github.com/SmartBFT-Go/consensus/pkg/types" + "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric-protos-go/orderer/smartbft" + "github.com/hyperledger/fabric/bccsp" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/crypto" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/orderer/common/cluster" + "github.com/hyperledger/fabric/orderer/common/localconfig" + "github.com/hyperledger/fabric/orderer/consensus" + "github.com/hyperledger/fabric/orderer/consensus/etcdraft" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +// RuntimeConfig defines the configuration of the consensus +// that is related to runtime. +type RuntimeConfig struct { + BFTConfig types.Configuration + isConfig bool + logger *flogging.FabricLogger + id uint64 + LastCommittedBlockHash string + RemoteNodes []cluster.RemoteNode + ID2Identities NodeIdentitiesByID + LastBlock *cb.Block + LastConfigBlock *cb.Block + Nodes []uint64 +} + +// BlockCommitted updates the config from the block +func (rtc RuntimeConfig) BlockCommitted(block *cb.Block, bccsp bccsp.BCCSP) (RuntimeConfig, error) { + if _, err := cluster.ConfigFromBlock(block); err == nil { + return rtc.configBlockCommitted(block, bccsp) + } + return RuntimeConfig{ + BFTConfig: rtc.BFTConfig, + id: rtc.id, + logger: rtc.logger, + LastCommittedBlockHash: hex.EncodeToString(protoutil.BlockHeaderHash(block.Header)), + Nodes: rtc.Nodes, + ID2Identities: rtc.ID2Identities, + RemoteNodes: rtc.RemoteNodes, + LastBlock: block, + LastConfigBlock: rtc.LastConfigBlock, + }, nil +} + +func (rtc RuntimeConfig) configBlockCommitted(block *cb.Block, bccsp bccsp.BCCSP) (RuntimeConfig, error) { + nodeConf, err := RemoteNodesFromConfigBlock(block, rtc.id, rtc.logger, bccsp) + if err != nil { + return rtc, errors.Wrap(err, "remote nodes cannot be computed, rejecting config block") + } + + bftConfig, err := configBlockToBFTConfig(rtc.id, block, bccsp) + if err != nil { + return RuntimeConfig{}, err + } + + return RuntimeConfig{ + BFTConfig: bftConfig, + isConfig: true, + id: rtc.id, + logger: rtc.logger, + LastCommittedBlockHash: hex.EncodeToString(protoutil.BlockHeaderHash(block.Header)), + Nodes: nodeConf.nodeIDs, + ID2Identities: nodeConf.id2Identities, + RemoteNodes: nodeConf.remoteNodes, + LastBlock: block, + LastConfigBlock: block, + }, nil +} + +func configBlockToBFTConfig(selfID uint64, block *cb.Block, bccsp bccsp.BCCSP) (types.Configuration, error) { + if block == nil || block.Data == nil || len(block.Data.Data) == 0 { + return types.Configuration{}, errors.New("empty block") + } + + env, err := protoutil.UnmarshalEnvelope(block.Data.Data[0]) + if err != nil { + return types.Configuration{}, err + } + bundle, err := channelconfig.NewBundleFromEnvelope(env, bccsp) + if err != nil { + return types.Configuration{}, err + } + + oc, ok := bundle.OrdererConfig() + if !ok { + return types.Configuration{}, errors.New("no orderer config") + } + + consensusConfigOptions := &smartbft.Options{} + if err := proto.Unmarshal(oc.ConsensusMetadata(), consensusConfigOptions); err != nil { + return types.Configuration{}, err + } + + return configFromMetadataOptions(selfID, consensusConfigOptions) +} + +//go:generate counterfeiter -o mocks/mock_blockpuller.go . BlockPuller + +// newBlockPuller creates a new block puller +func newBlockPuller( + support consensus.ConsenterSupport, + baseDialer *cluster.PredicateDialer, + clusterConfig localconfig.Cluster, + bccsp bccsp.BCCSP) (BlockPuller, error) { + verifyBlockSequence := func(blocks []*cb.Block, _ string) error { + return cluster.VerifyBlocksBFT(blocks, support) + } + + stdDialer := &cluster.StandardDialer{ + Config: baseDialer.Config.Clone(), + } + stdDialer.Config.AsyncConnect = false + stdDialer.Config.SecOpts.VerifyCertificate = nil + + // Extract the TLS CA certs and endpoints from the configuration, + endpoints, err := etcdraft.EndpointconfigFromSupport(support, bccsp) + if err != nil { + return nil, err + } + + der, _ := pem.Decode(stdDialer.Config.SecOpts.Certificate) + if der == nil { + return nil, errors.Errorf("client certificate isn't in PEM format: %v", + string(stdDialer.Config.SecOpts.Certificate)) + } + + bp := &cluster.BlockPuller{ + VerifyBlockSequence: verifyBlockSequence, + Logger: flogging.MustGetLogger("orderer.common.cluster.puller"), + RetryTimeout: clusterConfig.ReplicationRetryTimeout, + MaxTotalBufferBytes: clusterConfig.ReplicationBufferSize, + FetchTimeout: clusterConfig.ReplicationPullTimeout, + Endpoints: endpoints, + Signer: support, + TLSCert: der.Bytes, + Channel: support.ChannelID(), + Dialer: stdDialer, + } + + return bp, nil +} + +func getViewMetadataFromBlock(block *cb.Block) (*smartbftprotos.ViewMetadata, error) { + if block.Header.Number == 0 { + // Genesis block has no prior metadata so we just return an un-initialized metadata + return new(smartbftprotos.ViewMetadata), nil + } + + signatureMetadata := protoutil.GetMetadataFromBlockOrPanic(block, cb.BlockMetadataIndex_SIGNATURES) + ordererMD := &cb.OrdererBlockMetadata{} + if err := proto.Unmarshal(signatureMetadata.Value, ordererMD); err != nil { + return nil, errors.Wrap(err, "failed unmarshaling OrdererBlockMetadata") + } + + var viewMetadata smartbftprotos.ViewMetadata + if err := proto.Unmarshal(ordererMD.ConsenterMetadata, &viewMetadata); err != nil { + return nil, err + } + + return &viewMetadata, nil +} + +func configFromMetadataOptions(selfID uint64, options *smartbft.Options) (types.Configuration, error) { + var err error + + config := types.DefaultConfig + config.SelfID = (uint64)(selfID) + + if options == nil { + return config, errors.New("config metadata options field is nil") + } + + config.RequestBatchMaxCount = options.RequestBatchMaxCount + config.RequestBatchMaxBytes = options.RequestBatchMaxBytes + if config.RequestBatchMaxInterval, err = time.ParseDuration(options.RequestBatchMaxInterval); err != nil { + return config, errors.Wrap(err, "bad config metadata option RequestBatchMaxInterval") + } + config.IncomingMessageBufferSize = options.IncomingMessageBufferSize + config.RequestPoolSize = options.RequestPoolSize + if config.RequestForwardTimeout, err = time.ParseDuration(options.RequestForwardTimeout); err != nil { + return config, errors.Wrap(err, "bad config metadata option RequestForwardTimeout") + } + if config.RequestComplainTimeout, err = time.ParseDuration(options.RequestComplainTimeout); err != nil { + return config, errors.Wrap(err, "bad config metadata option RequestComplainTimeout") + } + if config.RequestAutoRemoveTimeout, err = time.ParseDuration(options.RequestAutoRemoveTimeout); err != nil { + return config, errors.Wrap(err, "bad config metadata option RequestAutoRemoveTimeout") + } + if config.ViewChangeResendInterval, err = time.ParseDuration(options.ViewChangeResendInterval); err != nil { + return config, errors.Wrap(err, "bad config metadata option ViewChangeResendInterval") + } + if config.ViewChangeTimeout, err = time.ParseDuration(options.ViewChangeTimeout); err != nil { + return config, errors.Wrap(err, "bad config metadata option ViewChangeTimeout") + } + if config.LeaderHeartbeatTimeout, err = time.ParseDuration(options.LeaderHeartbeatTimeout); err != nil { + return config, errors.Wrap(err, "bad config metadata option LeaderHeartbeatTimeout") + } + config.LeaderHeartbeatCount = options.LeaderHeartbeatCount + if config.CollectTimeout, err = time.ParseDuration(options.CollectTimeout); err != nil { + return config, errors.Wrap(err, "bad config metadata option CollectTimeout") + } + config.SyncOnStart = options.SyncOnStart + config.SpeedUpViewChange = options.SpeedUpViewChange + + if options.DecisionsPerLeader == 0 { + config.DecisionsPerLeader = 1 + } + + // Enable rotation by default, but optionally disable it + switch options.LeaderRotation { + case smartbft.Options_ROTATION_OFF: + config.LeaderRotation = false + config.DecisionsPerLeader = 0 + default: + config.LeaderRotation = true + } + + if err = config.Validate(); err != nil { + return config, errors.Wrap(err, "config validation failed") + } + + return config, nil +} + +type request struct { + sigHdr *cb.SignatureHeader + envelope *cb.Envelope + chHdr *cb.ChannelHeader +} + +// RequestInspector inspects incomming requests and validates serialized identity +type RequestInspector struct { + ValidateIdentityStructure func(identity *msp.SerializedIdentity) error +} + +func (ri *RequestInspector) requestIDFromSigHeader(sigHdr *cb.SignatureHeader) (types.RequestInfo, error) { + sID := &msp.SerializedIdentity{} + if err := proto.Unmarshal(sigHdr.Creator, sID); err != nil { + return types.RequestInfo{}, errors.Wrap(err, "identity isn't an MSP Identity") + } + + if err := ri.ValidateIdentityStructure(sID); err != nil { + return types.RequestInfo{}, err + } + + var preimage []byte + preimage = append(preimage, sigHdr.Nonce...) + preimage = append(preimage, sigHdr.Creator...) + txID := sha256.Sum256(preimage) + clientID := sha256.Sum256(sigHdr.Creator) + return types.RequestInfo{ + ID: hex.EncodeToString(txID[:]), + ClientID: hex.EncodeToString(clientID[:]), + }, nil +} + +// RequestID unwraps the request info from the raw request +func (ri *RequestInspector) RequestID(rawReq []byte) types.RequestInfo { + req, err := ri.unwrapReq(rawReq) + if err != nil { + return types.RequestInfo{} + } + reqInfo, err := ri.requestIDFromSigHeader(req.sigHdr) + if err != nil { + return types.RequestInfo{} + } + return reqInfo +} + +func (ri *RequestInspector) unwrapReq(req []byte) (*request, error) { + envelope, err := protoutil.UnmarshalEnvelope(req) + if err != nil { + return nil, err + } + payload := &cb.Payload{} + if err := proto.Unmarshal(envelope.Payload, payload); err != nil { + return nil, errors.Wrap(err, "failed unmarshaling payload") + } + + if payload.Header == nil { + return nil, errors.Errorf("no header in payload") + } + + sigHdr := &cb.SignatureHeader{} + if err := proto.Unmarshal(payload.Header.SignatureHeader, sigHdr); err != nil { + return nil, err + } + + if len(payload.Header.ChannelHeader) == 0 { + return nil, errors.New("no channel header in payload") + } + + chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) + if err != nil { + return nil, errors.WithMessage(err, "error unmarshaling channel header") + } + + return &request{ + chHdr: chdr, + sigHdr: sigHdr, + envelope: envelope, + }, nil +} + +// RemoteNodesFromConfigBlock unmarshals the node config from the block metadata +func RemoteNodesFromConfigBlock(block *cb.Block, selfID uint64, logger *flogging.FabricLogger, bccsp bccsp.BCCSP) (*nodeConfig, error) { + env := &cb.Envelope{} + if err := proto.Unmarshal(block.Data.Data[0], env); err != nil { + return nil, errors.Wrap(err, "failed unmarshaling envelope of config block") + } + bundle, err := channelconfig.NewBundleFromEnvelope(env, bccsp) + if err != nil { + return nil, errors.Wrap(err, "failed getting a new bundle from envelope of config block") + } + + oc, ok := bundle.OrdererConfig() + if !ok { + return nil, errors.New("no orderer config in config block") + } + + configOptions := &smartbft.Options{} + if err := proto.Unmarshal(oc.ConsensusMetadata(), configOptions); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal consensus metadata") + } + /* if configOptions == nil { + return nil, errors.New("failed to retrieve consensus metadata options") + } */ + + var nodeIDs []uint64 + var remoteNodes []cluster.RemoteNode + id2Identies := map[uint64][]byte{} + for _, consenter := range oc.Consenters() { + sanitizedID, err := crypto.SanitizeIdentity(consenter.Identity) + if err != nil { + logger.Panicf("Failed to sanitize identity: %v", err) + } + id2Identies[(uint64)(consenter.Id)] = sanitizedID + logger.Infof("%s %d ---> %s", bundle.ConfigtxValidator().ChannelID(), consenter.Id, string(consenter.Identity)) + + nodeIDs = append(nodeIDs, (uint64)(consenter.Id)) + + // No need to know yourself + if selfID == (uint64)(consenter.Id) { + continue + } + serverCertAsDER, err := pemToDER(consenter.ServerTlsCert, (uint64)(consenter.Id), "server", logger) + if err != nil { + return nil, errors.WithStack(err) + } + clientCertAsDER, err := pemToDER(consenter.ClientTlsCert, (uint64)(consenter.Id), "client", logger) + if err != nil { + return nil, errors.WithStack(err) + } + + // Validate certificate structure + for _, cert := range [][]byte{serverCertAsDER, clientCertAsDER} { + if _, err := x509.ParseCertificate(cert); err != nil { + pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}) + logger.Errorf("Invalid certificate: %s", string(pemBytes)) + return nil, err + } + } + + remoteNodes = append(remoteNodes, cluster.RemoteNode{ + NodeAddress: cluster.NodeAddress{ + ID: (uint64)(consenter.Id), + Endpoint: fmt.Sprintf("%s:%d", consenter.Host, consenter.Port), + }, + NodeCerts: cluster.NodeCerts{ + ClientTLSCert: clientCertAsDER, + ServerTLSCert: serverCertAsDER, + }, + }) + } + + sort.Slice(nodeIDs, func(i, j int) bool { + return nodeIDs[i] < nodeIDs[j] + }) + + return &nodeConfig{ + remoteNodes: remoteNodes, + id2Identities: id2Identies, + nodeIDs: nodeIDs, + }, nil +} + +type nodeConfig struct { + id2Identities NodeIdentitiesByID + remoteNodes []cluster.RemoteNode + nodeIDs []uint64 +} + +// ConsenterCertificate denotes a TLS certificate of a consenter +type ConsenterCertificate struct { + ConsenterCertificate []byte + CryptoProvider bccsp.BCCSP +} + +// IsConsenterOfChannel returns whether the caller is a consenter of a channel +// by inspecting the given configuration block. +// It returns nil if true, else returns an error. +func (conCert ConsenterCertificate) IsConsenterOfChannel(configBlock *cb.Block) error { + if configBlock == nil { + return errors.New("nil block") + } + envelopeConfig, err := protoutil.ExtractEnvelope(configBlock, 0) + if err != nil { + return err + } + bundle, err := channelconfig.NewBundleFromEnvelope(envelopeConfig, conCert.CryptoProvider) + if err != nil { + return err + } + oc, exists := bundle.OrdererConfig() + if !exists { + return errors.New("no orderer config in bundle") + } + if oc.ConsensusType() != "smartbft" { + return errors.New("not a SmartBFT config block") + } + + for _, consenter := range oc.Consenters() { + fmt.Println(base64.StdEncoding.EncodeToString(consenter.ServerTlsCert)) + if bytes.Equal(conCert.ConsenterCertificate, consenter.ServerTlsCert) || bytes.Equal(conCert.ConsenterCertificate, consenter.ClientTlsCert) { + return nil + } + } + return cluster.ErrNotInChannel +} diff --git a/orderer/consensus/smartbft/verifier.go b/orderer/consensus/smartbft/verifier.go new file mode 100644 index 00000000000..317564f8412 --- /dev/null +++ b/orderer/consensus/smartbft/verifier.go @@ -0,0 +1,448 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package smartbft + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "sync" + "sync/atomic" + + "github.com/SmartBFT-Go/consensus/pkg/types" + "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + cb "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/common/policies" + "github.com/hyperledger/fabric/common/util" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" +) + +//go:generate mockery -dir . -name Sequencer -case underscore -output mocks + +// Sequencer returns sequences +type Sequencer interface { + Sequence() uint64 +} + +//go:generate mockery -dir . -name ConsenterVerifier -case underscore -output mocks + +// ConsenterVerifier is used to determine whether a signature from one of the consenters is valid +type ConsenterVerifier interface { + // Evaluate takes a set of SignedData and evaluates whether this set of signatures satisfies the policy + Evaluate(signatureSet []*protoutil.SignedData) error +} + +//go:generate mockery -dir . -name AccessController -case underscore -output mocks + +// AccessController is used to determine if a signature of a certain client is valid +type AccessController interface { + // Evaluate takes a set of SignedData and evaluates whether this set of signatures satisfies the policy + Evaluate(signatureSet []*protoutil.SignedData) error +} + +type requestVerifier func(req []byte, isolated bool) (types.RequestInfo, error) + +// NodeIdentitiesByID stores Identities by id +type NodeIdentitiesByID map[uint64][]byte + +// IdentityToID looks up the Identity in NodeIdentitiesByID and returns id and flag true if found +func (nibd NodeIdentitiesByID) IdentityToID(identity []byte) (uint64, bool) { + sID := &msp.SerializedIdentity{} + if err := proto.Unmarshal(identity, sID); err != nil { + return 0, false + } + for id, currIdentity := range nibd { + currentID := &msp.SerializedIdentity{} + if err := proto.Unmarshal(currIdentity, currentID); err != nil { + return 0, false + } + if proto.Equal(currentID, sID) { + return id, true + } + } + return 0, false +} + +// Verifier verifies proposals and signatures +type Verifier struct { + RuntimeConfig *atomic.Value + ReqInspector *RequestInspector + ConsenterVerifier ConsenterVerifier + AccessController AccessController + VerificationSequencer Sequencer + Ledger Ledger + Logger *flogging.FabricLogger + ConfigValidator ConfigValidator +} + +// AuxiliaryData unmarshals and returns auxiliary data from signature +func (v *Verifier) AuxiliaryData(msg []byte) []byte { + sig := &Signature{} + if err := sig.Unmarshal(msg); err != nil { + v.Logger.Warnf("Failed unmarshalling signature message %s: %v", hex.EncodeToString(msg), err) + } + return sig.AuxiliaryInput +} + +// VerifyProposal verifies proposal and returns []RequestInfo +func (v *Verifier) VerifyProposal(proposal types.Proposal) ([]types.RequestInfo, error) { + block, err := ProposalToBlock(proposal) + if err != nil { + return nil, err + } + + rtc := v.RuntimeConfig.Load().(RuntimeConfig) + if err := verifyHashChain(block, rtc.LastCommittedBlockHash); err != nil { + return nil, err + } + + requests, err := v.verifyBlockDataAndMetadata(block, proposal.Metadata) + if err != nil { + return nil, err + } + + verificationSeq := v.VerificationSequence() + if verificationSeq != uint64(proposal.VerificationSequence) { + return nil, errors.Errorf("expected verification sequence %d, but proposal has %d", verificationSeq, proposal.VerificationSequence) + } + + return requests, nil +} + +// RequestsFromProposal converts proposal to []RequestInfo +func (v *Verifier) RequestsFromProposal(proposal types.Proposal) []types.RequestInfo { + block, err := ProposalToBlock(proposal) + if err != nil { + return []types.RequestInfo{} + } + + if block.Data == nil { + return []types.RequestInfo{} + } + + var res []types.RequestInfo + for _, txn := range block.Data.Data { + req := v.ReqInspector.RequestID(txn) + res = append(res, req) + } + + return res +} + +// VerifySignature verifies signature +func (v *Verifier) VerifySignature(signature types.Signature) error { + id2Identity := v.RuntimeConfig.Load().(RuntimeConfig).ID2Identities + identity, exists := id2Identity[signature.ID] + if !exists { + return errors.Errorf("node with id of %d doesn't exist", signature.ID) + } + + return v.AccessController.Evaluate([]*protoutil.SignedData{ + {Identity: identity, Data: signature.Msg, Signature: signature.Value}, + }) +} + +// VerifyRequest verifies raw request +func (v *Verifier) VerifyRequest(rawRequest []byte) (types.RequestInfo, error) { + return v.verifyRequest(rawRequest, false) +} + +func (v *Verifier) verifyRequest(rawRequest []byte, noConfigAllowed bool) (types.RequestInfo, error) { + req, err := v.ReqInspector.unwrapReq(rawRequest) + if err != nil { + return types.RequestInfo{}, err + } + + err = v.AccessController.Evaluate([]*protoutil.SignedData{ + {Identity: req.sigHdr.Creator, Data: req.envelope.Payload, Signature: req.envelope.Signature}, + }) + + if err != nil { + return types.RequestInfo{}, errors.Wrap(err, "access denied") + } + + if noConfigAllowed && req.chHdr.Type != int32(cb.HeaderType_ENDORSER_TRANSACTION) { + return types.RequestInfo{}, errors.Errorf("only endorser transactions can be sent with other transactions") + } + + switch req.chHdr.Type { + case int32(cb.HeaderType_CONFIG): + case int32(cb.HeaderType_ORDERER_TRANSACTION): + case int32(cb.HeaderType_ENDORSER_TRANSACTION): + default: + return types.RequestInfo{}, errors.Errorf("transaction of type %s is not allowed to be included in blocks", cb.HeaderType_name[req.chHdr.Type]) + } + + if req.chHdr.Type == int32(cb.HeaderType_CONFIG) || req.chHdr.Type == int32(cb.HeaderType_ORDERER_TRANSACTION) { + err := v.ConfigValidator.ValidateConfig(req.envelope) + if err != nil { + v.Logger.Errorf("Error verifying config update: %v", err) + return types.RequestInfo{}, err + } + } + + return v.ReqInspector.requestIDFromSigHeader(req.sigHdr) +} + +// VerifyConsenterSig verifies consenter signature +func (v *Verifier) VerifyConsenterSig(signature types.Signature, prop types.Proposal) ([]byte, error) { + id2Identity := v.RuntimeConfig.Load().(RuntimeConfig).ID2Identities + + identity, exists := id2Identity[signature.ID] + if !exists { + return nil, errors.Errorf("node with id of %d doesn't exist", signature.ID) + } + + sig := &Signature{} + if err := sig.Unmarshal(signature.Msg); err != nil { + v.Logger.Errorf("Failed unmarshaling signature from %d: %v", signature.ID, err) + v.Logger.Errorf("Offending signature Msg: %s", base64.StdEncoding.EncodeToString(signature.Msg)) + v.Logger.Errorf("Offending signature Value: %s", base64.StdEncoding.EncodeToString(signature.Value)) + return nil, errors.Wrap(err, "malformed signature format") + } + + // Reconstruct the signature header + sig.SignatureHeader = protoutil.MarshalOrPanic(&cb.SignatureHeader{ + Nonce: sig.Nonce, + Creator: identity, + }) + + if err := v.verifySignatureIsBoundToProposal(sig, identity, prop); err != nil { + return nil, err + } + + expectedMsgToBeSigned := util.ConcatenateBytes(sig.OrdererBlockMetadata, sig.SignatureHeader, sig.BlockHeader, sig.AuxiliaryInput) + signedData := &protoutil.SignedData{ + Signature: signature.Value, + Data: expectedMsgToBeSigned, + Identity: identity, + } + + return sig.AuxiliaryInput, v.ConsenterVerifier.Evaluate([]*protoutil.SignedData{signedData}) +} + +// VerificationSequence returns verification sequence +func (v *Verifier) VerificationSequence() uint64 { + return v.VerificationSequencer.Sequence() +} + +func verifyHashChain(block *cb.Block, prevHeaderHash string) error { + thisHdrHashOfPrevHdr := hex.EncodeToString(block.Header.PreviousHash) + if prevHeaderHash != thisHdrHashOfPrevHdr { + return errors.Errorf("previous header hash is %s but expected %s", thisHdrHashOfPrevHdr, prevHeaderHash) + } + + dataHash := hex.EncodeToString(block.Header.DataHash) + actualHashOfData := hex.EncodeToString(protoutil.BlockDataHash(block.Data)) + if dataHash != actualHashOfData { + return errors.Errorf("data hash is %s but expected %s", dataHash, actualHashOfData) + } + return nil +} + +func (v *Verifier) verifyBlockDataAndMetadata(block *cb.Block, metadata []byte) ([]types.RequestInfo, error) { + if block.Data == nil || len(block.Data.Data) == 0 { + return nil, errors.New("empty block data") + } + + if block.Metadata == nil || len(block.Metadata.Metadata) < len(cb.BlockMetadataIndex_name) { + return nil, errors.New("block metadata is either missing or contains too few entries") + } + + signatureMetadata, err := protoutil.GetMetadataFromBlock(block, cb.BlockMetadataIndex_SIGNATURES) + if err != nil { + return nil, err + } + ordererMetadataFromSignature := &cb.OrdererBlockMetadata{} + if err := proto.Unmarshal(signatureMetadata.Value, ordererMetadataFromSignature); err != nil { + return nil, errors.Wrap(err, "failed unmarshaling OrdererBlockMetadata") + } + + // Ensure the view metadata in the block signature and in the proposal are the same + + metadataInBlock := &smartbftprotos.ViewMetadata{} + if err := proto.Unmarshal(ordererMetadataFromSignature.ConsenterMetadata, metadataInBlock); err != nil { + return nil, errors.Wrap(err, "failed unmarshaling smartbft metadata from block") + } + + metadataFromProposal := &smartbftprotos.ViewMetadata{} + if err := proto.Unmarshal(metadata, metadataFromProposal); err != nil { + return nil, errors.Wrap(err, "failed unmarshaling smartbft metadata from proposal") + } + + if !proto.Equal(metadataInBlock, metadataFromProposal) { + return nil, errors.Errorf("expected metadata in block to be %v but got %v", metadataFromProposal, metadataInBlock) + } + + rtc := v.RuntimeConfig.Load().(RuntimeConfig) + lastConfig := rtc.LastConfigBlock.Header.Number + + if protoutil.IsConfigBlock(block) { + lastConfig = block.Header.Number + } + + // Verify last config + if ordererMetadataFromSignature.LastConfig == nil { + return nil, errors.Errorf("last config is nil") + } + + if ordererMetadataFromSignature.LastConfig.Index != lastConfig { + return nil, errors.Errorf("last config in block orderer metadata points to %d but our persisted last config is %d", ordererMetadataFromSignature.LastConfig.Index, lastConfig) + } + + rawLastConfig, err := protoutil.GetMetadataFromBlock(block, cb.BlockMetadataIndex_LAST_CONFIG) + if err != nil { + return nil, err + } + lastConf := &cb.LastConfig{} + if err := proto.Unmarshal(rawLastConfig.Value, lastConf); err != nil { + return nil, err + } + if lastConf.Index != lastConfig { + return nil, errors.Errorf("last config in block metadata points to %d but our persisted last config is %d", ordererMetadataFromSignature.LastConfig.Index, lastConfig) + } + + return validateTransactions(block.Data.Data, v.verifyRequest) +} + +func validateTransactions(blockData [][]byte, verifyReq requestVerifier) ([]types.RequestInfo, error) { + var validationFinished sync.WaitGroup + validationFinished.Add(len(blockData)) + + type txnValidation struct { + indexInBlock int + extractedInfo types.RequestInfo + validationErr error + } + + noConfigAllowed := len(blockData) > 1 + + validations := make(chan txnValidation, len(blockData)) + for i, payload := range blockData { + go func(indexInBlock int, payload []byte) { + defer validationFinished.Done() + reqInfo, err := verifyReq(payload, noConfigAllowed) + validations <- txnValidation{ + indexInBlock: indexInBlock, + extractedInfo: reqInfo, + validationErr: err, + } + }(i, payload) + } + + validationFinished.Wait() + close(validations) + + indexToRequestInfo := make(map[int]types.RequestInfo) + for validationResult := range validations { + indexToRequestInfo[validationResult.indexInBlock] = validationResult.extractedInfo + if validationResult.validationErr != nil { + return nil, validationResult.validationErr + } + } + + var res []types.RequestInfo + for indexInBlock := range blockData { + res = append(res, indexToRequestInfo[indexInBlock]) + } + + return res, nil +} + +func (v *Verifier) verifySignatureIsBoundToProposal(sig *Signature, identity []byte, prop types.Proposal) error { + // We verify the following fields: + // ConsenterMetadata []byte + // SignatureHeader []byte + // BlockHeader []byte + // OrdererBlockMetadata []byte + + // Ensure block header is equal + if !bytes.Equal(prop.Header, sig.BlockHeader) { + v.Logger.Errorf("Expected block header %s but got %s", base64.StdEncoding.EncodeToString(prop.Header), + base64.StdEncoding.EncodeToString(sig.BlockHeader)) + return errors.Errorf("mismatched block header") + } + + // Ensure signature header matches the identity + sigHdr := &cb.SignatureHeader{} + if err := proto.Unmarshal(sig.SignatureHeader, sigHdr); err != nil { + return errors.Wrap(err, "malformed signature header") + } + if !bytes.Equal(sigHdr.Creator, identity) { + v.Logger.Warnf("Expected identity %s but got %s", base64.StdEncoding.EncodeToString(sigHdr.Creator), + base64.StdEncoding.EncodeToString(identity)) + return errors.Errorf("identity in signature header does not match expected identity") + } + + // Ensure orderer block metadata's consenter MD matches the proposal + ordererMD := &cb.OrdererBlockMetadata{} + if err := proto.Unmarshal(sig.OrdererBlockMetadata, ordererMD); err != nil { + return errors.Wrap(err, "malformed orderer metadata in signature") + } + + if !bytes.Equal(ordererMD.ConsenterMetadata, prop.Metadata) { + v.Logger.Warnf("Expected consenter metadata %s but got %s in proposal", + base64.StdEncoding.EncodeToString(ordererMD.ConsenterMetadata), base64.StdEncoding.EncodeToString(prop.Metadata)) + return errors.Errorf("consenter metadata in OrdererBlockMetadata doesn't match proposal") + } + + block, err := ProposalToBlock(prop) + if err != nil { + v.Logger.Warnf("got malformed proposal: %v", err) + return err + } + + // Ensure Metadata slice is of the right size + if len(block.Metadata.Metadata) != len(cb.BlockMetadataIndex_name) { + return errors.Errorf("block metadata is of size %d but should be of size %d", + len(block.Metadata.Metadata), len(cb.BlockMetadataIndex_name)) + } + + signatureMetadata := &cb.Metadata{} + if err := proto.Unmarshal(block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES], signatureMetadata); err != nil { + return errors.Wrap(err, "malformed signature metadata") + } + + ordererMDFromBlock := &cb.OrdererBlockMetadata{} + if err := proto.Unmarshal(signatureMetadata.Value, ordererMDFromBlock); err != nil { + return errors.Wrap(err, "malformed orderer metadata in block") + } + + // Ensure the block's OrdererBlockMetadata matches the signature. + if !proto.Equal(ordererMDFromBlock, ordererMD) { + return errors.Errorf("signature's OrdererBlockMetadata and OrdererBlockMetadata extracted from block do not match") + } + + return nil +} + +type consenterVerifier struct { + logger *flogging.FabricLogger + channel string + policyManager policies.Manager +} + +// Evaluate evaluates signed data and returns no error if signature is valid and satisfies the policy +func (cv *consenterVerifier) Evaluate(signatureSet []*protoutil.SignedData) error { + policy, ok := cv.policyManager.GetPolicy(policies.ChannelOrdererWriters) + if !ok { + cv.logger.Errorf("[%s] Error: could not find policy %s in policy manager %v", cv.channel, policies.ChannelOrdererWriters, cv.policyManager) + return errors.Errorf("could not find policy %s", policies.ChannelOrdererWriters) + } + + if cv.logger.IsEnabledFor(zapcore.DebugLevel) { + cv.logger.Debugf("== Evaluating %T Policy %s ==", policy, policies.ChannelOrdererWriters) + defer cv.logger.Debugf("== Done Evaluating %T Policy %s", policy, policies.ChannelOrdererWriters) + } + + return policy.EvaluateSignedData(signatureSet) +} diff --git a/protoutil/commonutils.go b/protoutil/commonutils.go index ad5c333b1f0..2ccdb9ac6f3 100644 --- a/protoutil/commonutils.go +++ b/protoutil/commonutils.go @@ -267,3 +267,21 @@ func getRandomNonce() ([]byte, error) { } return key, nil } + +func IsConfigTransaction(envelope *cb.Envelope) bool { + payload, err := UnmarshalPayload(envelope.Payload) + if err != nil { + return false + } + + if payload.Header == nil { + return false + } + + hdr, err := UnmarshalChannelHeader(payload.Header.ChannelHeader) + if err != nil { + return false + } + + return cb.HeaderType(hdr.Type) == cb.HeaderType_CONFIG || cb.HeaderType(hdr.Type) == cb.HeaderType_ORDERER_TRANSACTION +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/LICENSE b/vendor/github.com/SmartBFT-Go/consensus/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/batcher.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/batcher.go new file mode 100644 index 00000000000..50c0011ced8 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/batcher.go @@ -0,0 +1,93 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "sync" + "time" +) + +// BatchBuilder implements Batcher +type BatchBuilder struct { + pool RequestPool + submittedChan chan struct{} + maxMsgCount int + maxSizeBytes uint64 + batchTimeout time.Duration + closeChan chan struct{} + closeLock sync.Mutex // Reset and Close may be called by different threads +} + +// NewBatchBuilder creates a new BatchBuilder +func NewBatchBuilder(pool RequestPool, submittedChan chan struct{}, maxMsgCount uint64, maxSizeBytes uint64, batchTimeout time.Duration) *BatchBuilder { + b := &BatchBuilder{ + pool: pool, + submittedChan: submittedChan, + maxMsgCount: int(maxMsgCount), + maxSizeBytes: maxSizeBytes, + batchTimeout: batchTimeout, + closeChan: make(chan struct{}), + } + return b +} + +// NextBatch returns the next batch of requests to be proposed. +// The method returns as soon as the batch is full, in terms of request count or total size, or after a timeout. +// The method may block. +func (b *BatchBuilder) NextBatch() [][]byte { + currBatch, full := b.pool.NextRequests(b.maxMsgCount, b.maxSizeBytes, true) + if full { + return currBatch + } + + timeout := time.After(b.batchTimeout) //TODO use task-scheduler based on logical time + + for { + select { + case <-b.closeChan: + return nil + case <-timeout: + currBatch, _ = b.pool.NextRequests(b.maxMsgCount, b.maxSizeBytes, false) + return currBatch + case <-b.submittedChan: + // there is a possibility to extend the current batch + currBatch, full = b.pool.NextRequests(b.maxMsgCount, b.maxSizeBytes, true) + if full { + return currBatch + } + } + } +} + +// Close closes the close channel to stop NextBatch +func (b *BatchBuilder) Close() { + b.closeLock.Lock() + defer b.closeLock.Unlock() + select { + case <-b.closeChan: + return + default: + + } + close(b.closeChan) +} + +// Closed returns true if the batcher is closed +func (b *BatchBuilder) Closed() bool { + select { + case <-b.closeChan: + return true + default: + return false + } +} + +// Reset reopens the close channel to allow calling NextBatch +func (b *BatchBuilder) Reset() { + b.closeLock.Lock() + defer b.closeLock.Unlock() + b.closeChan = make(chan struct{}) +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/controller.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/controller.go new file mode 100644 index 00000000000..9e6e80c4e0f --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/controller.go @@ -0,0 +1,842 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "sync" + "sync/atomic" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" +) + +// Decider delivers the proposal with signatures to the application +//go:generate mockery -dir . -name Decider -case underscore -output ./mocks/ +type Decider interface { + Decide(proposal types.Proposal, signatures []types.Signature, requests []types.RequestInfo) +} + +// FailureDetector initiates a view change when there is a complaint +//go:generate mockery -dir . -name FailureDetector -case underscore -output ./mocks/ +type FailureDetector interface { + Complain(viewNum uint64, stopView bool) +} + +// Batcher batches requests to eventually become a new proposal +//go:generate mockery -dir . -name Batcher -case underscore -output ./mocks/ +type Batcher interface { + NextBatch() [][]byte + Close() + Closed() bool + Reset() +} + +// RequestPool is a pool of client's requests +//go:generate mockery -dir . -name RequestPool -case underscore -output ./mocks/ +type RequestPool interface { + Prune(predicate func([]byte) error) + Submit(request []byte) error + Size() int + NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) + RemoveRequest(request types.RequestInfo) error + StopTimers() + RestartTimers() + Close() +} + +// LeaderMonitor monitors the heartbeat from the current leader +//go:generate mockery -dir . -name LeaderMonitor -case underscore -output ./mocks/ +type LeaderMonitor interface { + ChangeRole(role Role, view uint64, leaderID uint64) + ProcessMsg(sender uint64, msg *protos.Message) + InjectArtificialHeartbeat(sender uint64, msg *protos.Message) + HeartbeatWasSent() + Close() +} + +// Proposer proposes a new proposal to be agreed on +type Proposer interface { + Propose(proposal types.Proposal) + Start() + Abort() + GetMetadata() []byte + HandleMessage(sender uint64, m *protos.Message) +} + +// ProposerBuilder builds a new Proposer +//go:generate mockery -dir . -name ProposerBuilder -case underscore -output ./mocks/ +type ProposerBuilder interface { + NewProposer(leader, proposalSequence, viewNum, decisionsInView uint64, quorumSize int) Proposer +} + +// Controller controls the entire flow of the consensus +type Controller struct { + api.Comm + // configuration + ID uint64 + N uint64 + NodesList []uint64 + LeaderRotation bool + DecisionsPerLeader uint64 + RequestPool RequestPool + Batcher Batcher + LeaderMonitor LeaderMonitor + Verifier api.Verifier + Logger api.Logger + Assembler api.Assembler + Application api.Application + FailureDetector FailureDetector + Synchronizer api.Synchronizer + Signer api.Signer + RequestInspector api.RequestInspector + WAL api.WriteAheadLog + ProposerBuilder ProposerBuilder + Checkpoint *types.Checkpoint + ViewChanger *ViewChanger + Collector *StateCollector + State State + + quorum int + + currView Proposer + + currViewLock sync.RWMutex + currViewNumber uint64 + + currDecisionsInViewLock sync.RWMutex + currDecisionsInView uint64 + + viewChange chan viewInfo + abortViewChan chan uint64 + + stopOnce sync.Once + stopChan chan struct{} + + syncChan chan struct{} + decisionChan chan decision + deliverChan chan struct{} + leaderToken chan struct{} + verificationSequence uint64 + + controllerDone sync.WaitGroup + + ViewSequences *atomic.Value + + StartedWG *sync.WaitGroup +} + +func (c *Controller) blacklist() []uint64 { + prop, _ := c.Checkpoint.Get() + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(prop.Metadata, md); err != nil { + c.Logger.Panicf("Failed unmarshalling metadata: %v", err) + } + + return md.BlackList +} + +func (c *Controller) getCurrentViewNumber() uint64 { + c.currViewLock.RLock() + defer c.currViewLock.RUnlock() + + return c.currViewNumber +} + +func (c *Controller) setCurrentViewNumber(viewNumber uint64) { + c.currViewLock.Lock() + defer c.currViewLock.Unlock() + + c.currViewNumber = viewNumber +} + +func (c *Controller) getCurrentDecisionsInView() uint64 { + c.currDecisionsInViewLock.RLock() + defer c.currDecisionsInViewLock.RUnlock() + + return c.currDecisionsInView +} + +func (c *Controller) incrementCurrentDecisionsInView() { + c.currDecisionsInViewLock.Lock() + defer c.currDecisionsInViewLock.Unlock() + + c.currDecisionsInView++ +} + +func (c *Controller) setCurrentDecisionsInView(decisions uint64) { + c.currDecisionsInViewLock.Lock() + defer c.currDecisionsInViewLock.Unlock() + + c.currDecisionsInView = decisions +} + +// thread safe +func (c *Controller) iAmTheLeader() (bool, uint64) { + leader := c.leaderID() + return leader == c.ID, leader +} + +// thread safe +func (c *Controller) leaderID() uint64 { + return getLeaderID(c.getCurrentViewNumber(), c.N, c.NodesList, c.LeaderRotation, c.getCurrentDecisionsInView(), c.DecisionsPerLeader, c.blacklist()) +} + +func (c *Controller) GetLeaderID() uint64 { + return c.leaderID() +} + +// HandleRequest handles a request from the client +func (c *Controller) HandleRequest(sender uint64, req []byte) { + iAm, leaderID := c.iAmTheLeader() + if !iAm { + c.Logger.Warnf("Got request from %d but the leader is %d, dropping request", sender, leaderID) + return + } + reqInfo, err := c.Verifier.VerifyRequest(req) + if err != nil { + c.Logger.Warnf("Got bad request from %d: %v", sender, err) + return + } + c.Logger.Debugf("Got request from %d", sender) + c.addRequest(reqInfo, req) +} + +// SubmitRequest Submits a request to go through consensus. +func (c *Controller) SubmitRequest(request []byte) error { + info := c.RequestInspector.RequestID(request) + return c.addRequest(info, request) +} + +func (c *Controller) addRequest(info types.RequestInfo, request []byte) error { + err := c.RequestPool.Submit(request) + if err != nil { + c.Logger.Infof("Request %s was not submitted, error: %s", info, err) + return err + } + + c.Logger.Debugf("Request %s was submitted", info) + + return nil +} + +// OnRequestTimeout is called when request-timeout expires and forwards the request to leader. +// Called by the request-pool timeout goroutine. Upon return, the leader-forward timeout is started. +func (c *Controller) OnRequestTimeout(request []byte, info types.RequestInfo) { + iAm, leaderID := c.iAmTheLeader() + if iAm { + c.Logger.Infof("Request %s timeout expired, this node is the leader, nothing to do", info) + return + } + + c.Logger.Infof("Request %s timeout expired, forwarding request to leader: %d", info, leaderID) + c.Comm.SendTransaction(leaderID, request) + + return +} + +// OnLeaderFwdRequestTimeout is called when the leader-forward timeout expires, and complains about the leader. +// Called by the request-pool timeout goroutine. Upon return, the auto-remove timeout is started. +func (c *Controller) OnLeaderFwdRequestTimeout(request []byte, info types.RequestInfo) { + iAm, leaderID := c.iAmTheLeader() + if iAm { + c.Logger.Infof("Request %s leader-forwarding timeout expired, this node is the leader, nothing to do", info) + return + } + + c.Logger.Warnf("Request %s leader-forwarding timeout expired, complaining about leader: %d", info, leaderID) + c.FailureDetector.Complain(c.getCurrentViewNumber(), true) + + return +} + +// OnAutoRemoveTimeout is called when the auto-remove timeout expires. +// Called by the request-pool timeout goroutine. +func (c *Controller) OnAutoRemoveTimeout(requestInfo types.RequestInfo) { + c.Logger.Debugf("Request %s auto-remove timeout expired, removed from the request pool", requestInfo) +} + +// OnHeartbeatTimeout is called when the heartbeat timeout expires. +// Called by the HeartbeatMonitor goroutine. +func (c *Controller) OnHeartbeatTimeout(view uint64, leaderID uint64) { + c.Logger.Debugf("Heartbeat timeout expired, reported-view: %d, reported-leader: %d", view, leaderID) + + iAm, currentLeaderID := c.iAmTheLeader() + if iAm { + c.Logger.Debugf("Heartbeat timeout expired, this node is the leader, nothing to do; current-view: %d, current-leader: %d", + c.getCurrentViewNumber(), currentLeaderID) + return + } + + if leaderID != currentLeaderID { + c.Logger.Warnf("Heartbeat timeout expired, but current leader: %d, differs from reported leader: %d; ignoring", currentLeaderID, leaderID) + return + } + + c.Logger.Warnf("Heartbeat timeout expired, complaining about leader: %d", leaderID) + c.FailureDetector.Complain(c.getCurrentViewNumber(), true) +} + +// ProcessMessages dispatches the incoming message to the required component +func (c *Controller) ProcessMessages(sender uint64, m *protos.Message) { + c.Logger.Debugf("%d got message from %d: %s", c.ID, sender, MsgToString(m)) + switch m.GetContent().(type) { + case *protos.Message_PrePrepare, *protos.Message_Prepare, *protos.Message_Commit: + c.currViewLock.RLock() + view := c.currView + c.currViewLock.RUnlock() + view.HandleMessage(sender, m) + c.ViewChanger.HandleViewMessage(sender, m) + if sender == c.leaderID() { + c.LeaderMonitor.InjectArtificialHeartbeat(sender, c.convertViewMessageToHeartbeat(m)) + } + case *protos.Message_ViewChange, *protos.Message_ViewData, *protos.Message_NewView: + c.ViewChanger.HandleMessage(sender, m) + case *protos.Message_HeartBeat, *protos.Message_HeartBeatResponse: + c.LeaderMonitor.ProcessMsg(sender, m) + case *protos.Message_StateTransferRequest: + c.respondToStateTransferRequest(sender) + case *protos.Message_StateTransferResponse: + c.Collector.HandleMessage(sender, m) + default: + c.Logger.Warnf("Unexpected message type, ignoring") + } +} + +func (c *Controller) respondToStateTransferRequest(sender uint64) { + vs := c.ViewSequences.Load() + if vs == nil { + c.Logger.Panicf("ViewSequences is nil") + } + msg := &protos.Message{ + Content: &protos.Message_StateTransferResponse{ + StateTransferResponse: &protos.StateTransferResponse{ + ViewNum: c.getCurrentViewNumber(), + Sequence: vs.(ViewSequence).ProposalSeq, + }, + }, + } + c.Comm.SendConsensus(sender, msg) +} + +func (c *Controller) convertViewMessageToHeartbeat(m *protos.Message) *protos.Message { + view := viewNumber(m) + seq := proposalSequence(m) + return &protos.Message{ + Content: &protos.Message_HeartBeat{ + HeartBeat: &protos.HeartBeat{ + View: view, + Seq: seq, + }, + }, + } +} + +func (c *Controller) startView(proposalSequence uint64) { + view := c.ProposerBuilder.NewProposer(c.leaderID(), proposalSequence, c.currViewNumber, c.currDecisionsInView, c.quorum) + + c.currViewLock.Lock() + c.currView = view + c.currView.Start() + c.currViewLock.Unlock() + + role := Follower + leader, _ := c.iAmTheLeader() + if leader { + role = Leader + } + c.LeaderMonitor.ChangeRole(role, c.currViewNumber, c.leaderID()) + c.Logger.Infof("Starting view with number %d, sequence %d, and decisions %d", c.currViewNumber, proposalSequence, c.currDecisionsInView) +} + +func (c *Controller) changeView(newViewNumber uint64, newProposalSequence uint64, newDecisionsInView uint64) { + + latestView := c.getCurrentViewNumber() + if latestView > newViewNumber { + c.Logger.Debugf("Got view change to %d but already at %d", newViewNumber, latestView) + return + } + + if !c.abortView(latestView) { + return + } + + c.setCurrentViewNumber(newViewNumber) + c.setCurrentDecisionsInView(newDecisionsInView) + c.Logger.Debugf("Starting view after setting decisions in view to %d", newDecisionsInView) + c.startView(newProposalSequence) + + // If I'm the leader, I can claim the leader token. + if iAm, _ := c.iAmTheLeader(); iAm { + c.Batcher.Reset() + c.acquireLeaderToken() + } +} + +func (c *Controller) abortView(view uint64) bool { + currView := c.getCurrentViewNumber() + if view < currView { + c.Logger.Debugf("Was asked to abort view %d but the current view with number %d", view, currView) + return false + } + + // Drain the leader token in case we held it, + // so we won't start proposing after view change. + c.relinquishLeaderToken() + + // Kill current view + c.Logger.Debugf("Aborting current view with number %d", c.currViewNumber) + c.currView.Abort() + + return true +} + +// Sync initiates a synchronization +func (c *Controller) Sync() { + if iAmLeader, _ := c.iAmTheLeader(); iAmLeader { + c.Batcher.Close() + } + c.grabSyncToken() +} + +// AbortView makes the controller abort the current view +func (c *Controller) AbortView(view uint64) { + c.Logger.Debugf("AbortView, the current view num is %d", c.getCurrentViewNumber()) + + c.Batcher.Close() + + c.abortViewChan <- view +} + +// ViewChanged makes the controller abort the current view and start a new one with the given numbers +func (c *Controller) ViewChanged(newViewNumber uint64, newProposalSequence uint64) { + c.Logger.Debugf("ViewChanged, the new view is %d", newViewNumber) + amILeader, _ := c.iAmTheLeader() + if amILeader { + c.Batcher.Close() + } + c.viewChange <- viewInfo{proposalSeq: newProposalSequence, viewNumber: newViewNumber} +} + +func (c *Controller) getNextBatch() [][]byte { + var validRequests [][]byte + for len(validRequests) == 0 { // no valid requests in this batch + requests := c.Batcher.NextBatch() + if c.stopped() || c.Batcher.Closed() { + return nil + } + for _, req := range requests { + validRequests = append(validRequests, req) + } + } + return validRequests +} + +func (c *Controller) propose() { + nextBatch := c.getNextBatch() + if len(nextBatch) == 0 { + // If our next batch is empty, + // it can only be because + // the batcher is stopped and so are we. + return + } + metadata := c.currView.GetMetadata() + proposal := c.Assembler.AssembleProposal(metadata, nextBatch) + c.currView.Propose(proposal) +} + +func (c *Controller) run() { + // At exit, always make sure to kill current view + // and wait for it to finish. + defer func() { + c.Logger.Infof("Exiting") + c.currView.Abort() + }() + + for { + select { + case d := <-c.decisionChan: + c.decide(d) + case newView := <-c.viewChange: + c.changeView(newView.viewNumber, newView.proposalSeq, 0) + case view := <-c.abortViewChan: + c.abortView(view) + case <-c.stopChan: + return + case <-c.leaderToken: + c.propose() + case <-c.syncChan: + view, seq, dec := c.sync() + c.MaybePruneRevokedRequests() + if view > 0 || seq > 0 { + c.changeView(view, seq, dec) + } else { + vs := c.ViewSequences.Load() + if vs == nil { + c.Logger.Panicf("ViewSequences is nil") + } + c.changeView(c.getCurrentViewNumber(), vs.(ViewSequence).ProposalSeq, c.getCurrentDecisionsInView()) + } + } + } +} + +func (c *Controller) decide(d decision) { + reconfig := c.Application.Deliver(d.proposal, d.signatures) + if reconfig.InLatestDecision { + c.close() + } + c.Checkpoint.Set(d.proposal, d.signatures) + c.Logger.Debugf("Node %d delivered proposal", c.ID) + c.removeDeliveredFromPool(d) + select { + case c.deliverChan <- struct{}{}: + case <-c.stopChan: + return + } + c.incrementCurrentDecisionsInView() + + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(d.proposal.Metadata, md); err != nil { + c.Logger.Panicf("Failed to unmarshal proposal metadata, error: %v", err) + } + + if c.checkIfRotate(md.BlackList) { + c.Logger.Debugf("Restarting view to rotate the leader") + c.changeView(c.getCurrentViewNumber(), md.LatestSequence+1, c.getCurrentDecisionsInView()) + } + c.MaybePruneRevokedRequests() + if iAm, _ := c.iAmTheLeader(); iAm { + c.acquireLeaderToken() + } +} + +func (c *Controller) checkIfRotate(blacklist []uint64) bool { + view := c.getCurrentViewNumber() + decisionsInView := c.getCurrentDecisionsInView() + c.Logger.Debugf("view(%d) + (decisionsInView(%d) / decisionsPerLeader(%d)), N(%d), blacklist(%v)", + view, decisionsInView, c.DecisionsPerLeader, c.N, blacklist) + // called after increment + currLeader := getLeaderID(view, c.N, c.NodesList, c.LeaderRotation, decisionsInView-1, c.DecisionsPerLeader, blacklist) + nextLeader := getLeaderID(view, c.N, c.NodesList, c.LeaderRotation, decisionsInView, c.DecisionsPerLeader, blacklist) + shouldWeRotate := currLeader != nextLeader + if shouldWeRotate { + c.Logger.Infof("Rotating leader from %d to %d", currLeader, nextLeader) + } + + return shouldWeRotate +} + +func (c *Controller) sync() (viewNum uint64, seq uint64, decisions uint64) { + // Block any concurrent sync attempt. + c.grabSyncToken() + // At exit, enable sync once more, but ignore + // all synchronization attempts done while + // we were syncing. + defer c.relinquishSyncToken() + + syncResponse := c.Synchronizer.Sync() + if syncResponse.Reconfig.InReplicatedDecisions { + c.close() + c.ViewChanger.close() + } + decision := syncResponse.Latest + if decision.Proposal.Metadata == nil { + c.Logger.Infof("Synchronizer returned with proposal metadata nil") + response := c.fetchState() + if response == nil { + return 0, 0, 0 + } + if response.View > 0 && response.Seq == 1 { + c.Logger.Infof("The collected state is with view %d and sequence %d", response.View, response.Seq) + newViewToSave := &protos.SavedMessage{ + Content: &protos.SavedMessage_NewView{ + NewView: &protos.ViewMetadata{ + ViewId: response.View, + LatestSequence: 0, + DecisionsInView: 0, + }, + }, + } + if err := c.State.Save(newViewToSave); err != nil { + c.Logger.Panicf("Failed to save message to state, error: %v", err) + } + c.ViewChanger.InformNewView(response.View) + return response.View, 1, 0 + } + return 0, 0, 0 + } + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(decision.Proposal.Metadata, md); err != nil { + c.Logger.Panicf("Controller was unable to unmarshal the proposal metadata returned by the Synchronizer") + } + if md.ViewId < c.currViewNumber { + c.Logger.Infof("Synchronizer returned with view number %d but the controller is at view number %d", md.ViewId, c.currViewNumber) + return 0, 0, 0 + } + c.Logger.Infof("Synchronized to view %d and sequence %d with verification sequence %d", md.ViewId, md.LatestSequence, decision.Proposal.VerificationSequence) + + view := md.ViewId + newView := false + + response := c.fetchState() + if response != nil { + if response.View > md.ViewId && response.Seq == md.LatestSequence+1 { + c.Logger.Infof("The collected state is with view %d and sequence %d", response.View, response.Seq) + view = response.View + newViewToSave := &protos.SavedMessage{ + Content: &protos.SavedMessage_NewView{ + NewView: &protos.ViewMetadata{ + ViewId: view, + LatestSequence: md.LatestSequence, + DecisionsInView: 0, + }, + }, + } + if err := c.State.Save(newViewToSave); err != nil { + c.Logger.Panicf("Failed to save message to state, error: %v", err) + } + newView = true + } + } + + c.Logger.Debugf("Node %d is setting the checkpoint after sync to view %d and seq %d", c.ID, md.ViewId, md.LatestSequence) + c.Checkpoint.Set(decision.Proposal, decision.Signatures) + c.verificationSequence = uint64(decision.Proposal.VerificationSequence) + c.Logger.Debugf("Node %d is informing the view changer after sync of view %d and seq %d", c.ID, md.ViewId, md.LatestSequence) + c.ViewChanger.InformNewView(view) + if md.LatestSequence == 0 || newView { + return view, md.LatestSequence + 1, 0 + } + return view, md.LatestSequence + 1, md.DecisionsInView + 1 +} + +func (c *Controller) fetchState() *types.ViewAndSeq { + msg := &protos.Message{ + Content: &protos.Message_StateTransferRequest{ + StateTransferRequest: &protos.StateTransferRequest{}, + }, + } + c.Collector.ClearCollected() + c.BroadcastConsensus(msg) + return c.Collector.CollectStateResponses() +} + +func (c *Controller) grabSyncToken() { + select { + case c.syncChan <- struct{}{}: + default: + } +} + +func (c *Controller) relinquishSyncToken() { + select { + case <-c.syncChan: + default: + } +} + +// MaybePruneRevokedRequests prunes requests with different verification sequence +func (c *Controller) MaybePruneRevokedRequests() { + oldVerSqn := c.verificationSequence + newVerSqn := c.Verifier.VerificationSequence() + if newVerSqn == oldVerSqn { + return + } + c.verificationSequence = newVerSqn + + c.Logger.Infof("Verification sequence changed: %d --> %d", oldVerSqn, newVerSqn) + c.RequestPool.Prune(func(req []byte) error { + _, err := c.Verifier.VerifyRequest(req) + return err + }) +} + +func (c *Controller) acquireLeaderToken() { + select { + case c.leaderToken <- struct{}{}: + default: + // No room, seems we're already a leader. + } +} + +func (c *Controller) relinquishLeaderToken() { + select { + case <-c.leaderToken: + default: + + } +} + +func (c *Controller) syncOnStart(startViewNumber uint64, startProposalSequence uint64, startDecisionsInView uint64) (viewNum uint64, seq uint64, decisions uint64) { + syncView, syncSeq, syncDecsions := c.sync() + c.MaybePruneRevokedRequests() + viewNum = startViewNumber + seq = startProposalSequence + decisions = startDecisionsInView + if syncView > startViewNumber { + viewNum = syncView + decisions = syncDecsions + } + if syncSeq > startProposalSequence { + seq = syncSeq + decisions = syncDecsions + } + return viewNum, seq, decisions +} + +// Start the controller +func (c *Controller) Start(startViewNumber uint64, startProposalSequence uint64, startDecisionsInView uint64, syncOnStart bool) { + c.Logger.Debugf("Starting controller with view %d, sequence %d, and decisions %d", startViewNumber, startProposalSequence, startDecisionsInView) + c.controllerDone.Add(1) + c.stopOnce = sync.Once{} + c.syncChan = make(chan struct{}, 1) + c.stopChan = make(chan struct{}) + c.leaderToken = make(chan struct{}, 1) + c.decisionChan = make(chan decision) + c.deliverChan = make(chan struct{}) + c.viewChange = make(chan viewInfo, 1) + c.abortViewChan = make(chan uint64, 1) + + Q, F := computeQuorum(c.N) + c.Logger.Debugf("The number of nodes (N) is %d, F is %d, and the quorum size is %d", c.N, F, Q) + c.quorum = Q + + c.verificationSequence = c.Verifier.VerificationSequence() + + if syncOnStart { + startViewNumber, startProposalSequence, startDecisionsInView = c.syncOnStart(startViewNumber, startProposalSequence, startDecisionsInView) + c.Logger.Debugf("After sync starting controller with view %d, sequence %d, and decisions %d", startViewNumber, startProposalSequence, startDecisionsInView) + } + + c.currViewNumber = startViewNumber + c.currDecisionsInView = startDecisionsInView + c.startView(startProposalSequence) + if iAm, _ := c.iAmTheLeader(); iAm { + c.acquireLeaderToken() + } + + go func() { + defer c.controllerDone.Done() + c.run() + }() + + c.StartedWG.Done() +} + +func (c *Controller) close() { + c.stopOnce.Do( + func() { + select { + case <-c.stopChan: + return + default: + close(c.stopChan) + } + }, + ) +} + +// Stop the controller +func (c *Controller) Stop() { + c.close() + c.Batcher.Close() + c.RequestPool.Close() + c.LeaderMonitor.Close() + + // Drain the leader token if we hold it. + select { + case <-c.leaderToken: + default: + // Do nothing + } + + c.controllerDone.Wait() +} + +// Stop the controller but only stop the requests pool timers +func (c *Controller) StopWithPoolPause() { + c.close() + c.Batcher.Close() + c.RequestPool.StopTimers() + c.LeaderMonitor.Close() + + // Drain the leader token if we hold it. + select { + case <-c.leaderToken: + default: + // Do nothing + } + + c.controllerDone.Wait() +} + +func (c *Controller) stopped() bool { + select { + case <-c.stopChan: + return true + default: + return false + } +} + +// Decide delivers the decision to the application +func (c *Controller) Decide(proposal types.Proposal, signatures []types.Signature, requests []types.RequestInfo) { + select { + case c.decisionChan <- decision{ + proposal: proposal, + requests: requests, + signatures: signatures, + }: + case <-c.stopChan: + // In case we are in the middle of shutting down, + // abort deciding. + return + } + + select { + case <-c.deliverChan: // wait for the delivery of the decision to the application + case <-c.stopChan: // If we stopped the controller, abort delivery + } + +} + +func (c *Controller) removeDeliveredFromPool(d decision) { + for _, reqInfo := range d.requests { + if err := c.RequestPool.RemoveRequest(reqInfo); err != nil { + c.Logger.Debugf("Request %s wasn't found in the pool : %s", reqInfo, err) + } + } +} + +type viewInfo struct { + viewNumber uint64 + proposalSeq uint64 +} + +type decision struct { + proposal types.Proposal + signatures []types.Signature + requests []types.RequestInfo +} + +//BroadcastConsensus broadcasts the message and informs the heartbeat monitor if necessary +func (c *Controller) BroadcastConsensus(m *protos.Message) { + for _, node := range c.NodesList { + // Do not send to yourself + if c.ID == node { + continue + } + c.Comm.SendConsensus(node, m) + } + + if m.GetPrePrepare() != nil || m.GetPrepare() != nil || m.GetCommit() != nil { + if leader, _ := c.iAmTheLeader(); leader { + c.LeaderMonitor.HeartbeatWasSent() + } + } +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/heartbeatmonitor.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/heartbeatmonitor.go new file mode 100644 index 00000000000..563cd969c17 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/heartbeatmonitor.go @@ -0,0 +1,396 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/smartbftprotos" +) + +// A node could either be a leader or a follower +const ( + Leader Role = false + Follower Role = true +) + +//go:generate mockery -dir . -name HeartbeatEventHandler -case underscore -output ./mocks/ + +// HeartbeatEventHandler defines who to call when a heartbeat timeout expires or a Sync needs to be triggered. +// This is implemented by the Controller. +type HeartbeatEventHandler interface { + // OnHeartbeatTimeout is called when a heartbeat timeout expires. + OnHeartbeatTimeout(view uint64, leaderID uint64) + // Sync is called when enough heartbeat responses report that the current leader's view is outdated. + Sync() +} + +// Role indicates if this node is a follower or a leader +type Role bool + +type roleChange struct { + view uint64 + leaderID uint64 + follower Role +} + +// heartbeatResponseCollector is a map from node ID to view number, and hold the last response from each node. +type heartbeatResponseCollector map[uint64]uint64 + +// HeartbeatMonitor implements LeaderMonitor +type HeartbeatMonitor struct { + scheduler <-chan time.Time + inc chan incMsg + stopChan chan struct{} + commandChan chan roleChange + logger api.Logger + hbTimeout time.Duration + hbCount uint64 + comm Comm + numberOfNodes uint64 + handler HeartbeatEventHandler + view uint64 + leaderID uint64 + follower Role + lastHeartbeat time.Time + lastTick time.Time + hbRespCollector heartbeatResponseCollector + running sync.WaitGroup + runOnce sync.Once + timedOut bool + syncReq bool + viewSequences *atomic.Value + sentHeartbeat chan struct{} + artificialHeartbeat chan incMsg + behindSeq uint64 + behindCounter uint64 + numOfTicksBehindBeforeSyncing uint64 + followerBehind bool +} + +// NewHeartbeatMonitor creates a new HeartbeatMonitor +func NewHeartbeatMonitor(scheduler <-chan time.Time, logger api.Logger, heartbeatTimeout time.Duration, heartbeatCount uint64, comm Comm, numberOfNodes uint64, handler HeartbeatEventHandler, viewSequences *atomic.Value, numOfTicksBehindBeforeSyncing uint64) *HeartbeatMonitor { + hm := &HeartbeatMonitor{ + stopChan: make(chan struct{}), + inc: make(chan incMsg), + commandChan: make(chan roleChange), + scheduler: scheduler, + logger: logger, + hbTimeout: heartbeatTimeout, + hbCount: heartbeatCount, + comm: comm, + numberOfNodes: numberOfNodes, + handler: handler, + hbRespCollector: make(heartbeatResponseCollector), + viewSequences: viewSequences, + sentHeartbeat: make(chan struct{}, 1), + artificialHeartbeat: make(chan incMsg, 1), + numOfTicksBehindBeforeSyncing: numOfTicksBehindBeforeSyncing, + } + return hm +} + +func (hm *HeartbeatMonitor) start() { + hm.running.Add(1) + go hm.run() +} + +// Close stops following or sending heartbeats. +func (hm *HeartbeatMonitor) Close() { + if hm.closed() { + return + } + defer func() { + hm.lastHeartbeat = time.Time{} + hm.lastTick = time.Time{} + }() + defer hm.running.Wait() + close(hm.stopChan) +} + +func (hm *HeartbeatMonitor) run() { + defer hm.running.Done() + for { + select { + case <-hm.stopChan: + return + case now := <-hm.scheduler: + hm.tick(now) + case msg := <-hm.inc: + hm.handleMsg(msg.sender, msg.Message) + case cmd := <-hm.commandChan: + hm.handleCommand(cmd) + case <-hm.sentHeartbeat: + hm.lastHeartbeat = hm.lastTick + case msg := <-hm.artificialHeartbeat: + hm.handleArtificialHeartBeat(msg.sender, msg.GetHeartBeat()) + } + } +} + +// ProcessMsg handles an incoming heartbeat or heartbeat-response. +// If the sender and msg.View equal what we expect, and the timeout had not expired yet, the timeout is extended. +func (hm *HeartbeatMonitor) ProcessMsg(sender uint64, msg *smartbftprotos.Message) { + select { + case hm.inc <- incMsg{ + sender: sender, + Message: msg, + }: + case <-hm.stopChan: + } +} + +// InjectArtificialHeartbeat injects an artificial heartbeat to the monitor +func (hm *HeartbeatMonitor) InjectArtificialHeartbeat(sender uint64, msg *smartbftprotos.Message) { + select { + case hm.artificialHeartbeat <- incMsg{ + sender: sender, + Message: msg, + }: + default: + } +} + +// ChangeRole will change the role of this HeartbeatMonitor +func (hm *HeartbeatMonitor) ChangeRole(follower Role, view uint64, leaderID uint64) { + hm.runOnce.Do(func() { + hm.follower = follower + hm.start() + }) + + role := "leader" + if follower { + role = "follower" + } + + hm.logger.Infof("Changing to %s role, current view: %d, current leader: %d", role, view, leaderID) + select { + case hm.commandChan <- roleChange{ + leaderID: leaderID, + view: view, + follower: follower, + }: + case <-hm.stopChan: + return + } + +} + +func (hm *HeartbeatMonitor) handleMsg(sender uint64, msg *smartbftprotos.Message) { + switch msg.GetContent().(type) { + case *smartbftprotos.Message_HeartBeat: + hm.handleRealHeartBeat(sender, msg.GetHeartBeat()) + case *smartbftprotos.Message_HeartBeatResponse: + hm.handleHeartBeatResponse(sender, msg.GetHeartBeatResponse()) + default: + hm.logger.Warnf("Unexpected message type, ignoring") + } +} + +func (hm *HeartbeatMonitor) handleRealHeartBeat(sender uint64, hb *smartbftprotos.HeartBeat) { + hm.handleHeartBeat(sender, hb, false) +} + +func (hm *HeartbeatMonitor) handleArtificialHeartBeat(sender uint64, hb *smartbftprotos.HeartBeat) { + hm.handleHeartBeat(sender, hb, true) +} + +func (hm *HeartbeatMonitor) handleHeartBeat(sender uint64, hb *smartbftprotos.HeartBeat, artificial bool) { + if hb.View < hm.view { + hm.logger.Debugf("Heartbeat view is lower than expected, sending response; expected-view=%d, received-view: %d", hm.view, hb.View) + hm.sendHeartBeatResponse(sender) + return + } + + if sender != hm.leaderID { + hm.logger.Debugf("Heartbeat sender is not leader, ignoring; leader: %d, sender: %d", hm.leaderID, sender) + return + } + + if hb.View > hm.view { + hm.logger.Debugf("Heartbeat view is bigger than expected, syncing and ignoring; expected-view=%d, received-view: %d", hm.view, hb.View) + hm.handler.Sync() + return + } + + active, ourSeq := hm.viewActive(hb) + if active && !artificial { + if ourSeq+1 < hb.Seq { + hm.logger.Debugf("Heartbeat sequence is bigger than expected, leader's sequence is %d and ours is %d, syncing and ignoring", hb.Seq, ourSeq) + hm.handler.Sync() + return + } + if ourSeq+1 == hb.Seq { + hm.followerBehind = true + hm.logger.Debugf("Our sequence is behind the heartbeat sequence, leader's sequence is %d and ours is %d", hb.Seq, ourSeq) + if ourSeq > hm.behindSeq { + hm.behindSeq = ourSeq + hm.behindCounter = 0 + } + } else { + hm.followerBehind = false + } + } else { + hm.followerBehind = false + } + + hm.logger.Debugf("Received heartbeat from %d, last heartbeat was %v ago", sender, hm.lastTick.Sub(hm.lastHeartbeat)) + hm.lastHeartbeat = hm.lastTick +} + +// handleHeartBeatResponse keeps track of responses, and if we get f+1 identical, force a sync +func (hm *HeartbeatMonitor) handleHeartBeatResponse(sender uint64, hbr *smartbftprotos.HeartBeatResponse) { + if hm.follower { + hm.logger.Debugf("Monitor is not a leader, ignoring HeartBeatResponse; sender: %d, msg: %v", sender, hbr) + return + } + + if hm.syncReq { + hm.logger.Debugf("Monitor already called Sync, ignoring HeartBeatResponse; sender: %d, msg: %v", sender, hbr) + return + } + + if hm.view >= hbr.View { + hm.logger.Debugf("Monitor view: %d >= HeartBeatResponse, ignoring; sender: %d, msg: %v", hm.view, sender, hbr) + return + } + + hm.logger.Debugf("Received HeartBeatResponse, msg: %v; from %d", hbr, sender) + hm.hbRespCollector[sender] = hbr.View + + // check if we have f+1 votes + _, f := computeQuorum(hm.numberOfNodes) + if len(hm.hbRespCollector) >= f+1 { + hm.logger.Infof("Received HeartBeatResponse triggered a call to HeartBeatEventHandler Sync, view: %d", hbr.View) + hm.handler.Sync() + hm.syncReq = true + } +} + +func (hm *HeartbeatMonitor) sendHeartBeatResponse(target uint64) { + heartbeatResponse := &smartbftprotos.Message{ + Content: &smartbftprotos.Message_HeartBeatResponse{ + HeartBeatResponse: &smartbftprotos.HeartBeatResponse{ + View: hm.view, + }, + }, + } + hm.comm.SendConsensus(target, heartbeatResponse) + hm.logger.Debugf("Sent HeartBeatResponse view: %d; to %d", hm.view, target) +} + +func (hm *HeartbeatMonitor) viewActive(hbMsg *smartbftprotos.HeartBeat) (bool, uint64) { + vs := hm.viewSequences.Load() + // View isn't initialized + if vs == nil { + return false, 0 + } + + viewSeq := vs.(ViewSequence) + if !viewSeq.ViewActive { + return false, 0 + } + + return true, viewSeq.ProposalSeq +} + +func (hm *HeartbeatMonitor) tick(now time.Time) { + hm.lastTick = now + if hm.lastHeartbeat.IsZero() { + hm.lastHeartbeat = now + } + if hm.follower { + hm.followerTick(now) + } else { + hm.leaderTick(now) + } +} + +func (hm *HeartbeatMonitor) closed() bool { + select { + case <-hm.stopChan: + return true + default: + return false + } +} + +func (hm *HeartbeatMonitor) handleCommand(cmd roleChange) { + hm.timedOut = false + hm.view = cmd.view + hm.leaderID = cmd.leaderID + hm.follower = cmd.follower + hm.lastHeartbeat = hm.lastTick + hm.hbRespCollector = make(heartbeatResponseCollector) + hm.syncReq = false +} + +func (hm *HeartbeatMonitor) leaderTick(now time.Time) { + if now.Sub(hm.lastHeartbeat)*time.Duration(hm.hbCount) < hm.hbTimeout { + return + } + + var sequence uint64 + vs := hm.viewSequences.Load() + if vs != nil && vs.(ViewSequence).ViewActive { + sequence = vs.(ViewSequence).ProposalSeq + } else { + hm.logger.Infof("ViewSequence uninitialized or view inactive") + return + } + hm.logger.Debugf("Sending heartbeat with view %d, sequence %d", hm.view, sequence) + heartbeat := &smartbftprotos.Message{ + Content: &smartbftprotos.Message_HeartBeat{ + HeartBeat: &smartbftprotos.HeartBeat{ + View: hm.view, + Seq: sequence, + }, + }, + } + hm.comm.BroadcastConsensus(heartbeat) + hm.lastHeartbeat = now +} + +func (hm *HeartbeatMonitor) followerTick(now time.Time) { + if hm.timedOut || hm.lastHeartbeat.IsZero() { + hm.lastHeartbeat = now + return + } + + delta := now.Sub(hm.lastHeartbeat) + if delta >= hm.hbTimeout { + hm.logger.Warnf("Heartbeat timeout (%v) from %d expired; last heartbeat was observed %s ago", + hm.hbTimeout, hm.leaderID, delta) + hm.handler.OnHeartbeatTimeout(hm.view, hm.leaderID) + hm.timedOut = true + return + } + + hm.logger.Debugf("Last heartbeat from %d was %v ago", hm.leaderID, delta) + + if !hm.followerBehind { + return + } + + hm.behindCounter++ + if hm.behindCounter >= hm.numOfTicksBehindBeforeSyncing { + hm.logger.Warnf("Syncing since the follower with seq %d is behind the leader for the last %d ticks", hm.behindSeq, hm.numOfTicksBehindBeforeSyncing) + hm.handler.Sync() + hm.behindCounter = 0 + return + } +} + +// HeartbeatWasSent tells the monitor to skip sending a heartbeat +func (hm *HeartbeatMonitor) HeartbeatWasSent() { + select { + case hm.sentHeartbeat <- struct{}{}: + default: + } +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/requestpool.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/requestpool.go new file mode 100644 index 00000000000..4a2b17b5be7 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/requestpool.go @@ -0,0 +1,439 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "container/list" + "context" + "fmt" + "sync" + "time" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + "github.com/pkg/errors" + "golang.org/x/sync/semaphore" +) + +const ( + defaultRequestTimeout = 10 * time.Second // for unit tests only +) + +//go:generate mockery -dir . -name RequestTimeoutHandler -case underscore -output ./mocks/ + +// RequestTimeoutHandler defines the methods called by request timeout timers created by time.AfterFunc. +// This interface is implemented by the bft.Controller. +type RequestTimeoutHandler interface { + + // OnRequestTimeout is called when a request timeout expires. + OnRequestTimeout(request []byte, requestInfo types.RequestInfo) + + // OnLeaderFwdRequestTimeout is called when a leader forwarding timeout expires. + OnLeaderFwdRequestTimeout(request []byte, requestInfo types.RequestInfo) + + // OnAutoRemoveTimeout is called when a auto-remove timeout expires. + OnAutoRemoveTimeout(requestInfo types.RequestInfo) +} + +// Pool implements requests pool, maintains pool of given size provided during +// construction. In case there are more incoming request than given size it will +// block during submit until there will be place to submit new ones. +type Pool struct { + logger api.Logger + inspector api.RequestInspector + options PoolOptions + + lock sync.Mutex + fifo *list.List + semaphore *semaphore.Weighted + existMap map[types.RequestInfo]*list.Element + timeoutHandler RequestTimeoutHandler + closed bool + stopped bool + submittedChan chan struct{} + sizeBytes uint64 +} + +// requestItem captures request related information +type requestItem struct { + request []byte + timeout *time.Timer +} + +// PoolOptions is the pool configuration +type PoolOptions struct { + QueueSize int64 + ForwardTimeout time.Duration + ComplainTimeout time.Duration + AutoRemoveTimeout time.Duration +} + +// NewPool constructs new requests pool +func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool { + if options.ForwardTimeout == 0 { + options.ForwardTimeout = defaultRequestTimeout + } + if options.ComplainTimeout == 0 { + options.ComplainTimeout = defaultRequestTimeout + } + if options.AutoRemoveTimeout == 0 { + options.AutoRemoveTimeout = defaultRequestTimeout + } + + return &Pool{ + timeoutHandler: th, + logger: log, + inspector: inspector, + fifo: list.New(), + semaphore: semaphore.NewWeighted(options.QueueSize), + existMap: make(map[types.RequestInfo]*list.Element), + options: options, + submittedChan: submittedChan, + } +} + +// ChangeTimeouts changes the timeout of the pool +func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) { + rp.lock.Lock() + defer rp.lock.Unlock() + + if !rp.stopped { + rp.logger.Errorf("Trying to change timeouts but the pool is not stopped") + return + } + + if options.ForwardTimeout == 0 { + options.ForwardTimeout = defaultRequestTimeout + } + if options.ComplainTimeout == 0 { + options.ComplainTimeout = defaultRequestTimeout + } + if options.AutoRemoveTimeout == 0 { + options.AutoRemoveTimeout = defaultRequestTimeout + } + + rp.options.ForwardTimeout = options.ForwardTimeout + rp.options.ComplainTimeout = options.ComplainTimeout + rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout + + rp.timeoutHandler = th + + rp.logger.Debugf("Changed pool timeouts") +} + +func (rp *Pool) isClosed() bool { + rp.lock.Lock() + defer rp.lock.Unlock() + + return rp.closed +} + +// Submit a request into the pool, returns an error when request is already in the pool +func (rp *Pool) Submit(request []byte) error { + reqInfo := rp.inspector.RequestID(request) + if rp.isClosed() { + return errors.Errorf("pool closed, request rejected: %s", reqInfo) + } + + // do not wait for a semaphore with a lock, as it will prevent draining the pool. + if err := rp.semaphore.Acquire(context.Background(), 1); err != nil { + return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo) + } + + reqCopy := append(make([]byte, 0), request...) + + rp.lock.Lock() + defer rp.lock.Unlock() + + if _, exist := rp.existMap[reqInfo]; exist { + rp.semaphore.Release(1) + errStr := fmt.Sprintf("request %s already exists in the pool", reqInfo) + rp.logger.Debugf(errStr) + return errors.New(errStr) + } + + to := time.AfterFunc( + rp.options.ForwardTimeout, + func() { rp.onRequestTO(reqCopy, reqInfo) }, + ) + if rp.stopped { + rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo) + to.Stop() + } + reqItem := &requestItem{ + request: reqCopy, + timeout: to, + } + + element := rp.fifo.PushBack(reqItem) + rp.existMap[reqInfo] = element + + if len(rp.existMap) != rp.fifo.Len() { + rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) + } + + rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout) + + // notify that a request was submitted + select { + case rp.submittedChan <- struct{}{}: + default: + } + + rp.sizeBytes += uint64(len(element.Value.(*requestItem).request)) + + return nil +} + +// Size returns the number of requests currently residing the pool +func (rp *Pool) Size() int { + rp.lock.Lock() + defer rp.lock.Unlock() + + return len(rp.existMap) +} + +// NextRequests returns the next requests to be batched. +// It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice. +// Return variable full indicates that the batch cannot be increased further by calling again with the same arguments. +func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) { + rp.lock.Lock() + defer rp.lock.Unlock() + + if check { + if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) { + return nil, false + } + } + + count := minInt(rp.fifo.Len(), maxCount) + var totalSize uint64 + batch = make([][]byte, 0, count) + var element = rp.fifo.Front() + for i := 0; i < count; i++ { + req := element.Value.(*requestItem).request + reqLen := uint64(len(req)) + if totalSize+reqLen > maxSizeBytes { + rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB", + len(batch), totalSize, maxSizeBytes) + return batch, true + } + batch = append(batch, req) + totalSize = totalSize + reqLen + element = element.Next() + } + + fullS := totalSize >= maxSizeBytes + fullC := len(batch) == maxCount + full = fullS || fullC + if len(batch) > 0 { + rp.logger.Debugf("Returning batch of %d requests totalling %dB", + len(batch), totalSize) + } + return batch, full +} + +// Prune removes requests for which the given predicate returns error. +func (rp *Pool) Prune(predicate func([]byte) error) { + reqVec, infoVec := rp.copyRequests() + + var numPruned int + for i, req := range reqVec { + err := predicate(req) + if err == nil { + continue + } + + if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil { + rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr) + } else { + rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err) + numPruned++ + } + } + + rp.logger.Debugf("Pruned %d requests", numPruned) +} + +func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) { + rp.lock.Lock() + defer rp.lock.Unlock() + + requestVec = make([][]byte, len(rp.existMap)) + infoVec = make([]types.RequestInfo, len(rp.existMap)) + + var i int + for info, item := range rp.existMap { + infoVec[i] = info + requestVec[i] = item.Value.(*requestItem).request + i++ + } + + return +} + +// RemoveRequest removes the given request from the pool +func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error { + rp.lock.Lock() + defer rp.lock.Unlock() + + element, exist := rp.existMap[requestInfo] + if !exist { + errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo) + rp.logger.Debugf(errStr) + return fmt.Errorf(errStr) + } + + rp.deleteRequest(element, requestInfo) + rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request)) + return nil +} + +func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) { + item := element.Value.(*requestItem) + item.timeout.Stop() + + rp.fifo.Remove(element) + delete(rp.existMap, requestInfo) + rp.logger.Infof("Removed request %s from request pool", requestInfo) + rp.semaphore.Release(1) + + if len(rp.existMap) != rp.fifo.Len() { + rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) + } +} + +// Close removes all the requests, stops all the timeout timers. +func (rp *Pool) Close() { + rp.lock.Lock() + defer rp.lock.Unlock() + + rp.closed = true + + for requestInfo, element := range rp.existMap { + rp.deleteRequest(element, requestInfo) + } +} + +// StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped". +// This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running +// at the time of the call to StopTimers(). +func (rp *Pool) StopTimers() { + rp.lock.Lock() + defer rp.lock.Unlock() + + rp.stopped = true + + for _, element := range rp.existMap { + item := element.Value.(*requestItem) + item.timeout.Stop() + } + + rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap)) +} + +// RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows +// submission of new requests. +func (rp *Pool) RestartTimers() { + rp.lock.Lock() + defer rp.lock.Unlock() + + rp.stopped = false + + for reqInfo, element := range rp.existMap { + item := element.Value.(*requestItem) + item.timeout.Stop() + to := time.AfterFunc( + rp.options.ForwardTimeout, + func() { rp.onRequestTO(item.request, reqInfo) }, + ) + item.timeout = to + } + + rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap)) +} + +func (rp *Pool) contains(reqInfo types.RequestInfo) bool { + rp.lock.Lock() + defer rp.lock.Unlock() + _, contains := rp.existMap[reqInfo] + return contains +} + +// called by the goroutine spawned by time.AfterFunc +func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) { + if !rp.contains(reqInfo) { + return + } + // may take time, in case Comm channel to leader is full; hence w/o the lock. + rp.logger.Debugf("Request %s timeout expired, going to send to leader", reqInfo) + rp.timeoutHandler.OnRequestTimeout(request, reqInfo) + + rp.lock.Lock() + defer rp.lock.Unlock() + + element, contains := rp.existMap[reqInfo] + if !contains { + rp.logger.Debugf("Request %s no longer in pool", reqInfo) + return + } + + if rp.closed || rp.stopped { + rp.logger.Debugf("Pool stopped, will NOT start a leader-forwarding timeout") + return + } + + //start a second timeout + item := element.Value.(*requestItem) + item.timeout = time.AfterFunc( + rp.options.ComplainTimeout, + func() { rp.onLeaderFwdRequestTO(request, reqInfo) }, + ) + rp.logger.Debugf("Request %s; started a leader-forwarding timeout: %s", reqInfo, rp.options.ComplainTimeout) +} + +// called by the goroutine spawned by time.AfterFunc +func (rp *Pool) onLeaderFwdRequestTO(request []byte, reqInfo types.RequestInfo) { + if !rp.contains(reqInfo) { + return + } + // may take time, in case Comm channel is full; hence w/o the lock. + rp.logger.Debugf("Request %s leader-forwarding timeout expired, going to complain on leader", reqInfo) + rp.timeoutHandler.OnLeaderFwdRequestTimeout(request, reqInfo) + + rp.lock.Lock() + defer rp.lock.Unlock() + + element, contains := rp.existMap[reqInfo] + if !contains { + rp.logger.Debugf("Request %s no longer in pool", reqInfo) + return + } + + if rp.closed || rp.stopped { + rp.logger.Debugf("Pool stopped, will NOT start auto-remove timeout") + return + } + + //start a third timeout + item := element.Value.(*requestItem) + item.timeout = time.AfterFunc( + rp.options.AutoRemoveTimeout, + func() { rp.onAutoRemoveTO(reqInfo) }, + ) + rp.logger.Debugf("Request %s; started auto-remove timeout: %s", reqInfo, rp.options.AutoRemoveTimeout) +} + +// called by the goroutine spawned by time.AfterFunc +func (rp *Pool) onAutoRemoveTO(reqInfo types.RequestInfo) { + rp.logger.Debugf("Request %s auto-remove timeout expired, going to remove from pool", reqInfo) + if err := rp.RemoveRequest(reqInfo); err != nil { + rp.logger.Errorf("Removal of request %s failed; error: %s", reqInfo, err) + return + } + rp.timeoutHandler.OnAutoRemoveTimeout(reqInfo) + return +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/sched.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/sched.go new file mode 100644 index 00000000000..0e444bce201 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/sched.go @@ -0,0 +1,249 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "container/heap" + "sync" + "sync/atomic" + "time" +) + +type Stopper interface { + Stop() +} + +type Task struct { + Deadline time.Time + F func() + stopped uint32 +} + +func (t *Task) Stop() { + atomic.StoreUint32(&t.stopped, 1) +} + +func (t *Task) isStopped() bool { + return atomic.LoadUint32(&t.stopped) == uint32(1) +} + +type backingHeap []*Task + +func (h backingHeap) Len() int { + return len(h) +} + +func (h backingHeap) Less(i, j int) bool { + return h[i].Deadline.Before(h[j].Deadline) +} +func (h backingHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *backingHeap) Push(o interface{}) { + t := o.(*Task) + *h = append(*h, t) +} + +func (h *backingHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +type TaskQueue struct { + h *backingHeap +} + +func NewTaskQueue() *TaskQueue { + return &TaskQueue{h: &backingHeap{}} +} + +func (q *TaskQueue) Enqueue(t *Task) { + heap.Push(q.h, t) +} + +func (q *TaskQueue) DeQueue() *Task { + if len(*q.h) == 0 { + return nil + } + return heap.Pop(q.h).(*Task) +} + +func (q *TaskQueue) Top() *Task { + if len(*q.h) == 0 { + return nil + } + return (*q.h)[0] +} + +func (q TaskQueue) Size() int { + return q.h.Len() +} + +type cmd struct { + timeout time.Duration + t *Task +} + +type Scheduler struct { + exec *executor + currentTime time.Time + queue *TaskQueue + signalChan chan struct{} + cmdChan chan cmd + timeChan <-chan time.Time + stopChan chan struct{} + running sync.WaitGroup +} + +func NewScheduler(timeChan <-chan time.Time) *Scheduler { + s := &Scheduler{ + queue: NewTaskQueue(), + timeChan: timeChan, + signalChan: make(chan struct{}), + cmdChan: make(chan cmd), + stopChan: make(chan struct{}), + } + + s.exec = &executor{ + running: &s.running, + queue: make(chan func(), 1), + stopChan: s.stopChan, + } + + return s +} + +func (s *Scheduler) Start() { + s.running.Add(2) + + go s.exec.run() + go s.run() +} + +func (s *Scheduler) Stop() { + select { + case <-s.stopChan: + return + default: + + } + defer s.running.Wait() + close(s.stopChan) +} + +func (s *Scheduler) Schedule(timeout time.Duration, f func()) Stopper { + task := &Task{F: f} + s.cmdChan <- cmd{ + t: task, + timeout: timeout, + } + return task +} + +func (s *Scheduler) run() { + defer s.running.Done() + + s.waitForFirstTick() + + for { + select { + case <-s.stopChan: + return + case now := <-s.timeChan: + if s.currentTime.After(now) { + continue + } + s.currentTime = now + s.tick() + case <-s.signalChan: + s.tick() + case cmd := <-s.cmdChan: + task := cmd.t + task.Deadline = s.currentTime.Add(cmd.timeout) + s.queue.Enqueue(task) + } + } +} + +func (s *Scheduler) waitForFirstTick() { + select { + case s.currentTime = <-s.timeChan: + case <-s.stopChan: + } +} + +func (s *Scheduler) tick() { + for { + executedSomeTask := s.checkAndExecute() + // If we executed some Task, we can try to execute the next Task if + // such a Task is ready to be executed. + if !executedSomeTask { + return + } + } +} + +// checkAndExecute checks if there is an executable Task, +// and if so then executes it. +// Returns true if executed a Task, else false. +func (s *Scheduler) checkAndExecute() bool { + if s.queue.Size() == 0 { + return false + } + + // Should earliest deadline Task be scheduled? + if s.queue.Top().Deadline.After(s.currentTime) { + return false + } + + task := s.queue.DeQueue() + + f := func() { + if task.isStopped() { + return + } + task.F() + select { + case s.signalChan <- struct{}{}: + case <-s.exec.stopChan: + return + } + + } + + // Check if there is room in the executor queue by trying to enqueue into it. + select { + case s.exec.queue <- f: + // We succeeded in enqueueing, nothing to do. + return true + default: + // We couldn't enqueue to the executor, so re-add the Task. + s.queue.Enqueue(task) + return false + } +} + +type executor struct { + running *sync.WaitGroup + stopChan chan struct{} + queue chan func() // 1 capacity channel +} + +func (e *executor) run() { + defer e.running.Done() + for { + select { + case <-e.stopChan: + return + case f := <-e.queue: + f() + } + } +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/state.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/state.go new file mode 100644 index 00000000000..f3cc21b0bb1 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/state.go @@ -0,0 +1,247 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "fmt" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" +) + +type StateRecorder struct { + SavedMessages []*protos.SavedMessage +} + +func (sr *StateRecorder) Save(message *protos.SavedMessage) error { + sr.SavedMessages = append(sr.SavedMessages, message) + return nil +} + +func (*StateRecorder) Restore(_ *View) error { + panic("should not be used") +} + +type PersistedState struct { + InFlightProposal *InFlightData + Entries [][]byte + Logger api.Logger + WAL api.WriteAheadLog +} + +func (ps *PersistedState) Save(msgToSave *protos.SavedMessage) error { + if proposed := msgToSave.GetProposedRecord(); proposed != nil { + ps.storeProposal(proposed) + } + if prepared := msgToSave.GetCommit(); prepared != nil { + ps.storePrepared(prepared) + } + + b, err := proto.Marshal(msgToSave) + if err != nil { + ps.Logger.Panicf("Failed marshaling message: %v", err) + } + // It is only safe to truncate if we either: + // + // 1) Process a pre-prepare, because it means we safely persisted the + // previous proposal and have a stable checkpoint. + // 2) Acquired a new view message which contains 2f+1 attestations + // of the cluster agreeing to a new view configuration. + newProposal := msgToSave.GetProposedRecord() != nil + // TODO: handle view message here as well, and add "|| finalizedView" to truncate flag + return ps.WAL.Append(b, newProposal) +} + +func (ps *PersistedState) storeProposal(proposed *protos.ProposedRecord) { + proposal := proposed.PrePrepare.Proposal + proposalToStore := types.Proposal{ + VerificationSequence: int64(proposal.VerificationSequence), + Header: proposal.Header, + Payload: proposal.Payload, + Metadata: proposal.Metadata, + } + ps.InFlightProposal.StoreProposal(proposalToStore) +} + +func (ps *PersistedState) storePrepared(commitMsg *protos.Message) { + cmt := commitMsg.GetCommit() + ps.InFlightProposal.StorePrepares(cmt.View, cmt.Seq) +} + +func (ps *PersistedState) LoadNewViewIfApplicable() (*types.ViewAndSeq, error) { + entries := ps.Entries + if len(entries) == 0 { + ps.Logger.Infof("Nothing to restore") + return nil, nil + } + lastEntry := entries[len(entries)-1] + lastPersistedMessage := &protos.SavedMessage{} + if err := proto.Unmarshal(lastEntry, lastPersistedMessage); err != nil { + ps.Logger.Errorf("Failed unmarshaling last entry from WAL: %v", err) + return nil, errors.Wrap(err, "failed unmarshaling last entry from WAL") + } + if newViewMsg := lastPersistedMessage.GetNewView(); newViewMsg != nil { + ps.Logger.Infof("last entry in WAL is a newView record") + return &types.ViewAndSeq{View: newViewMsg.ViewId, Seq: newViewMsg.LatestSequence}, nil + } + return nil, nil +} + +func (ps *PersistedState) LoadViewChangeIfApplicable() (*protos.ViewChange, error) { + entries := ps.Entries + if len(entries) == 0 { + ps.Logger.Infof("Nothing to restore") + return nil, nil + } + lastEntry := entries[len(entries)-1] + lastPersistedMessage := &protos.SavedMessage{} + if err := proto.Unmarshal(lastEntry, lastPersistedMessage); err != nil { + ps.Logger.Errorf("Failed unmarshaling last entry from WAL: %v", err) + return nil, errors.Wrap(err, "failed unmarshaling last entry from WAL") + } + if viewChangeMsg := lastPersistedMessage.GetViewChange(); viewChangeMsg != nil { + ps.Logger.Infof("last entry in WAL is a viewChange message") + return viewChangeMsg, nil + } + return nil, nil +} + +func (ps *PersistedState) Restore(v *View) error { + // Unless we conclude otherwise, we're in a COMMITTED state + v.Phase = COMMITTED + + entries := ps.Entries + if len(entries) == 0 { + ps.Logger.Infof("Nothing to restore") + return nil + } + + ps.Logger.Infof("WAL contains %d entries", len(entries)) + + lastEntry := entries[len(entries)-1] + lastPersistedMessage := &protos.SavedMessage{} + if err := proto.Unmarshal(lastEntry, lastPersistedMessage); err != nil { + ps.Logger.Errorf("Failed unmarshaling last entry from WAL: %v", err) + return errors.Wrap(err, "failed unmarshaling last entry from WAL") + } + + if proposed := lastPersistedMessage.GetProposedRecord(); proposed != nil { + return ps.recoverProposed(proposed, v) + } + + if commitMsg := lastPersistedMessage.GetCommit(); commitMsg != nil { + return ps.recoverPrepared(commitMsg, v, entries) + } + + if newViewMsg := lastPersistedMessage.GetNewView(); newViewMsg != nil { + ps.Logger.Infof("last entry in WAL is a newView record") + return nil + } + + if viewChangeMsg := lastPersistedMessage.GetViewChange(); viewChangeMsg != nil { + ps.Logger.Infof("last entry in WAL is a viewChange message") + return nil + } + + return errors.Errorf("unrecognized record: %v", lastPersistedMessage) +} + +func (ps *PersistedState) recoverProposed(lastPersistedMessage *protos.ProposedRecord, v *View) error { + prop := lastPersistedMessage.GetPrePrepare().Proposal + v.inFlightProposal = &types.Proposal{ + VerificationSequence: int64(prop.VerificationSequence), + Metadata: prop.Metadata, + Payload: prop.Payload, + Header: prop.Header, + } + ps.storeProposal(lastPersistedMessage) + // Reconstruct the prepare message we shall next broadcast + // after the recovery. + prp := lastPersistedMessage.GetPrePrepare() + v.lastBroadcastSent = &protos.Message{ + Content: &protos.Message_Prepare{ + Prepare: lastPersistedMessage.GetPrepare(), + }, + } + v.Phase = PROPOSED + v.Number = prp.View + v.ProposalSequence = prp.Seq + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(prop.Metadata, md); err != nil { + ps.Logger.Panicf("Failed to unmarshal proposal metadata, error: %v", err) + } + v.DecisionsInView = md.DecisionsInView + ps.Logger.Infof("Restored proposal with sequence %d", lastPersistedMessage.GetPrePrepare().Seq) + return nil +} + +func (ps *PersistedState) recoverPrepared(lastPersistedMessage *protos.Message, v *View, entries [][]byte) error { + // Last entry is a commit, so we should have not pruned the previous pre-prepare + if len(entries) < 2 { + return fmt.Errorf("last message is a commit, but expected to also have a matching pre-prepare") + } + prePrepareMsg := &protos.SavedMessage{} + if err := proto.Unmarshal(entries[len(entries)-2], prePrepareMsg); err != nil { + ps.Logger.Errorf("Failed unmarshaling second last entry from WAL: %v", err) + return errors.Wrap(err, "failed unmarshaling last entry from WAL") + } + + prePrepareFromWAL := prePrepareMsg.GetProposedRecord().GetPrePrepare() + + if prePrepareFromWAL == nil { + return fmt.Errorf("expected second last message to be a pre-prepare, but got '%v' instead", prePrepareMsg) + } + + if v.ProposalSequence < prePrepareFromWAL.Seq { + err := fmt.Errorf("last proposal sequence persisted into WAL is %d which is greater than last committed sequence is %d", prePrepareFromWAL.Seq, v.ProposalSequence) + ps.Logger.Errorf("Failed recovery: %s", err) + return err + } + + // Check if the WAL's last sequence has been persisted into the application layer. + if v.ProposalSequence > prePrepareFromWAL.Seq { + ps.Logger.Infof("Last proposal with sequence %d has been safely committed", v.ProposalSequence) + return nil + } + + // Else, v.ProposalSequence == prePrepareFromWAL.Seq + + prop := prePrepareFromWAL.Proposal + v.inFlightProposal = &types.Proposal{ + VerificationSequence: int64(prop.VerificationSequence), + Metadata: prop.Metadata, + Payload: prop.Payload, + Header: prop.Header, + } + ps.storeProposal(prePrepareMsg.GetProposedRecord()) + ps.storePrepared(lastPersistedMessage) + + // Reconstruct the commit message we shall next broadcast + // after the recovery. + v.lastBroadcastSent = lastPersistedMessage + v.Phase = PREPARED + v.Number = prePrepareFromWAL.View + v.ProposalSequence = prePrepareFromWAL.Seq + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(prop.Metadata, md); err != nil { + ps.Logger.Panicf("Failed to unmarshal proposal metadata, error: %v", err) + } + v.DecisionsInView = md.DecisionsInView + + // Restore signature + signatureInLastSentCommit := v.lastBroadcastSent.GetCommit().Signature + v.myProposalSig = &types.Signature{ + ID: signatureInLastSentCommit.Signer, + Msg: signatureInLastSentCommit.Msg, + Value: signatureInLastSentCommit.Value, + } + + ps.Logger.Infof("Restored proposal with sequence %d", prePrepareFromWAL.Seq) + return nil +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/statecollector.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/statecollector.go new file mode 100644 index 00000000000..d5a4e8e3a06 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/statecollector.go @@ -0,0 +1,153 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "sync" + "time" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" +) + +// StateCollector collects the current state from other nodes +type StateCollector struct { + SelfID uint64 + N uint64 + f int + quorum int + + Logger api.Logger + + incMsgs chan *incMsg + + CollectTimeout time.Duration + + responses *voteSet + + stopOnce sync.Once + stopChan chan struct{} +} + +// Start starts the state collector +func (s *StateCollector) Start() { + s.incMsgs = make(chan *incMsg, s.N) + s.quorum, s.f = computeQuorum(s.N) + s.stopChan = make(chan struct{}) + s.stopOnce = sync.Once{} + + acceptResponse := func(_ uint64, message *protos.Message) bool { + return message.GetStateTransferResponse() != nil + } + s.responses = &voteSet{ + validVote: acceptResponse, + } + s.responses.clear(s.N) +} + +// HandleMessage handle messages addressed to the state collector +func (s *StateCollector) HandleMessage(sender uint64, m *protos.Message) { + if m.GetStateTransferResponse() == nil { + s.Logger.Panicf("Node %d handling a message which is not a response", s.SelfID) + } + msg := &incMsg{sender: sender, Message: m} + s.Logger.Debugf("Node %d handling state response: %v", s.SelfID, msg) + select { + case <-s.stopChan: + return + case s.incMsgs <- msg: + default: // if incMsgs is full do nothing + s.Logger.Debugf("Node %d reached default in handling state response: %v", s.SelfID, msg) + } +} + +// ClearCollected clears the responses collected by the state collector +func (s *StateCollector) ClearCollected() { + // drain message channel + for len(s.incMsgs) > 0 { + <-s.incMsgs + } +} + +// CollectStateResponses return a valid response or nil if reached timeout +func (s *StateCollector) CollectStateResponses() *types.ViewAndSeq { + s.responses.clear(s.N) + + timer := time.NewTimer(s.CollectTimeout) + defer timer.Stop() + + s.Logger.Debugf("Node %d started collecting state responses", s.SelfID) + + for { + select { + case <-s.stopChan: + return nil + case <-timer.C: + s.Logger.Infof("Node %d reached the state collector timeout", s.SelfID) + return nil + case msg := <-s.incMsgs: + s.Logger.Debugf("Node %d collected a response: %v", s.SelfID, msg) + s.responses.registerVote(msg.sender, msg.Message) + if viewAndSeq := s.collectedEnoughEqualVotes(); viewAndSeq != nil { + s.Logger.Infof("Node %d collected a valid state: view - %d and seq - %d", s.SelfID, viewAndSeq.View, viewAndSeq.Seq) + return viewAndSeq + } + } + } + +} + +func (s *StateCollector) collectedEnoughEqualVotes() *types.ViewAndSeq { + if len(s.responses.voted) <= s.f { + return nil + } + votesMap := make(map[types.ViewAndSeq]uint64) + num := len(s.responses.votes) + for i := 0; i < num; i++ { + vote := <-s.responses.votes + response := vote.GetStateTransferResponse() + if response == nil { + s.Logger.Panicf("Node %d collected a message which is not a response", s.SelfID) + return nil + } + viewAndSeq := types.ViewAndSeq{ + View: response.ViewNum, + Seq: response.Sequence, + } + s.Logger.Debugf("Node %d collected a responses with view - %d and seq - %d", s.SelfID, viewAndSeq.View, viewAndSeq.Seq) + s.responses.votes <- vote + if _, exist := votesMap[viewAndSeq]; exist { + votesMap[viewAndSeq]++ + } else { + votesMap[viewAndSeq] = 1 + } + } + for viewAndSeq, count := range votesMap { + if count > uint64(s.f) { + return &viewAndSeq + } + } + return nil +} + +func (s *StateCollector) close() { + s.stopOnce.Do( + func() { + select { + case <-s.stopChan: + return + default: + close(s.stopChan) + } + }, + ) +} + +// Stop the state collector +func (s *StateCollector) Stop() { + s.close() +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/support.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/support.go new file mode 100644 index 00000000000..d881e85e5ca --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/support.go @@ -0,0 +1,62 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/smartbftprotos" +) + +// Generate mocks for a collection of interfaces that are defined in api/dependencies.go + +// VerifierMock mock for the Verifier interface +//go:generate mockery -dir . -name VerifierMock -case underscore -output ./mocks/ +type VerifierMock interface { + api.Verifier +} + +// AssemblerMock mock for the Assembler interface +//go:generate mockery -dir . -name AssemblerMock -case underscore -output ./mocks/ +type AssemblerMock interface { + api.Assembler +} + +// ApplicationMock mock for the Application interface +//go:generate mockery -dir . -name ApplicationMock -case underscore -output ./mocks/ +type ApplicationMock interface { + api.Application +} + +// CommMock mock for the Comm interface +//go:generate mockery -dir . -name CommMock -case underscore -output ./mocks/ +type CommMock interface { + api.Comm + BroadcastConsensus(m *smartbftprotos.Message) +} + +// SynchronizerMock mock for the Synchronizer interface +//go:generate mockery -dir . -name SynchronizerMock -case underscore -output ./mocks/ +type SynchronizerMock interface { + api.Synchronizer +} + +// SignerMock mock for the Signer interface +//go:generate mockery -dir . -name SignerMock -case underscore -output ./mocks/ +type SignerMock interface { + api.Signer +} + +// MembershipNotifierMock mock for the MembershipNotifier interface +//go:generate mockery -dir . -name MembershipNotifierMock -case underscore -output ./mocks/ +type MembershipNotifierMock interface { + api.MembershipNotifier +} + +// Synchronizer mock for the Synchronizer interface (no return value) +//go:generate mockery -dir . -name Synchronizer -case underscore -output ./mocks/ +type Synchronizer interface { + Sync() +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/util.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/util.go new file mode 100644 index 00000000000..071a675d888 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/util.go @@ -0,0 +1,520 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "bytes" + "crypto/sha256" + "encoding/asn1" + "encoding/base64" + "fmt" + "math" + "sync" + "sync/atomic" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" +) + +type proposalInfo struct { + digest string + view uint64 + seq uint64 +} + +func viewNumber(m *protos.Message) uint64 { + if pp := m.GetPrePrepare(); pp != nil { + return pp.GetView() + } + + if prp := m.GetPrepare(); prp != nil { + return prp.GetView() + } + + if cmt := m.GetCommit(); cmt != nil { + return cmt.GetView() + } + + return math.MaxUint64 +} + +func proposalSequence(m *protos.Message) uint64 { + if pp := m.GetPrePrepare(); pp != nil { + return pp.Seq + } + + if prp := m.GetPrepare(); prp != nil { + return prp.Seq + } + + if cmt := m.GetCommit(); cmt != nil { + return cmt.Seq + } + + return math.MaxUint64 +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +// MarshalOrPanic marshals or panics when an error occurs +func MarshalOrPanic(msg proto.Message) []byte { + b, err := proto.Marshal(msg) + if err != nil { + panic(err) + } + return b +} + +func getLeaderID( + view uint64, + N uint64, + nodes []uint64, + leaderRotation bool, + decisionsInView uint64, + decisionsPerLeader uint64, + blacklist []uint64, +) uint64 { + blackListed := make(map[uint64]struct{}) + for _, n := range blacklist { + blackListed[n] = struct{}{} + } + + if !leaderRotation { + return nodes[view%N] + } + + for i := 0; i < len(nodes); i++ { + index := (view + (decisionsInView / decisionsPerLeader)) + uint64(i) + node := nodes[index%N] + _, exists := blackListed[node] + if !exists { + return node + } + } + + panic(fmt.Sprintf("all %d nodes are blacklisted", len(nodes))) +} + +type vote struct { + *protos.Message + sender uint64 +} + +type voteSet struct { + validVote func(voter uint64, message *protos.Message) bool + voted map[uint64]struct{} + votes chan *vote +} + +func (vs *voteSet) clear(n uint64) { + // Drain the votes channel + for len(vs.votes) > 0 { + <-vs.votes + } + + vs.voted = make(map[uint64]struct{}, n) + vs.votes = make(chan *vote, n) +} + +func (vs *voteSet) registerVote(voter uint64, message *protos.Message) { + if !vs.validVote(voter, message) { + return + } + + _, hasVoted := vs.voted[voter] + if hasVoted { + // Received double vote + return + } + + vs.voted[voter] = struct{}{} + vs.votes <- &vote{Message: message, sender: voter} +} + +type incMsg struct { + *protos.Message + sender uint64 +} + +// computeQuorum calculates the quorums size Q, given a cluster size N. +// +// The calculation satisfies the following: +// Given a cluster size of N nodes, which tolerates f failures according to: +// f = argmax ( N >= 3f+1 ) +// Q is the size of the quorum such that: +// any two subsets q1, q2 of size Q, intersect in at least f+1 nodes. +// +// Note that this is different from N-f (the number of correct nodes), when N=3f+3. That is, we have two extra nodes +// above the minimum required to tolerate f failures. +func computeQuorum(N uint64) (Q int, F int) { + F = int((int(N) - 1) / 3) + Q = int(math.Ceil((float64(N) + float64(F) + 1) / 2.0)) + return +} + +// InFlightData records proposals that are in-flight, +// as well as their corresponding prepares. +type InFlightData struct { + v atomic.Value +} + +type inFlightProposalData struct { + proposal *types.Proposal + prepared bool +} + +// InFlightProposal returns an in-flight proposal or nil if there is no such. +func (ifp *InFlightData) InFlightProposal() *types.Proposal { + fetched := ifp.v.Load() + if fetched == nil { + return nil + } + + data := fetched.(inFlightProposalData) + return data.proposal +} + +// IsInFlightPrepared returns true if the in-flight proposal is prepared. +func (ifp *InFlightData) IsInFlightPrepared() bool { + fetched := ifp.v.Load() + if fetched == nil { + return false + } + data := fetched.(inFlightProposalData) + return data.prepared +} + +// StoreProposal stores an in-flight proposal. +func (ifp *InFlightData) StoreProposal(prop types.Proposal) { + p := prop + ifp.v.Store(inFlightProposalData{proposal: &p}) +} + +// StorePrepares stores alongside the already stored in-flight proposal that it is prepared. +func (ifp *InFlightData) StorePrepares(view, seq uint64) { + prop := ifp.InFlightProposal() + if prop == nil { + panic("stored prepares but proposal is not initialized") + } + p := prop + ifp.v.Store(inFlightProposalData{proposal: p, prepared: true}) +} + +// ProposalMaker implements ProposerBuilder +type ProposalMaker struct { + DecisionsPerLeader uint64 + N uint64 + SelfID uint64 + Decider Decider + FailureDetector FailureDetector + Sync Synchronizer + Logger api.Logger + Comm Comm + Verifier api.Verifier + Signer api.Signer + MembershipNotifier api.MembershipNotifier + State State + InMsqQSize int + ViewSequences *atomic.Value + restoreOnceFromWAL sync.Once + Checkpoint *types.Checkpoint +} + +// NewProposer returns a new view +func (pm *ProposalMaker) NewProposer(leader, proposalSequence, viewNum, decisionsInView uint64, quorumSize int) Proposer { + view := &View{ + RetrieveCheckpoint: pm.Checkpoint.Get, + DecisionsPerLeader: pm.DecisionsPerLeader, + N: pm.N, + LeaderID: leader, + SelfID: pm.SelfID, + Quorum: quorumSize, + Number: viewNum, + Decider: pm.Decider, + FailureDetector: pm.FailureDetector, + Sync: pm.Sync, + Logger: pm.Logger, + Comm: pm.Comm, + Verifier: pm.Verifier, + Signer: pm.Signer, + MembershipNotifier: pm.MembershipNotifier, + ProposalSequence: proposalSequence, + DecisionsInView: decisionsInView, + State: pm.State, + InMsgQSize: pm.InMsqQSize, + ViewSequences: pm.ViewSequences, + } + + view.ViewSequences.Store(ViewSequence{ + ViewActive: true, + ProposalSeq: proposalSequence, + }) + + pm.restoreOnceFromWAL.Do(func() { + err := pm.State.Restore(view) + if err != nil { + pm.Logger.Panicf("Failed restoring view from WAL: %v", err) + } + }) + + if proposalSequence > view.ProposalSequence { + view.ProposalSequence = proposalSequence + view.DecisionsInView = decisionsInView + } + + if viewNum > view.Number { + view.Number = viewNum + view.DecisionsInView = decisionsInView + } + + return view +} + +// ViewSequence indicates if a view is currently active and its current proposal sequence +type ViewSequence struct { + ViewActive bool + ProposalSeq uint64 +} + +// MsgToString converts a given message to a printable string +func MsgToString(m *protos.Message) string { + if m == nil { + return "empty message" + } + switch m.GetContent().(type) { + case *protos.Message_PrePrepare: + return prePrepareToString(m.GetPrePrepare()) + case *protos.Message_NewView: + return newViewToString(m.GetNewView()) + case *protos.Message_ViewData: + return signedViewDataToString(m.GetViewData()) + case *protos.Message_HeartBeat: + return heartBeatToString(m.GetHeartBeat()) + case *protos.Message_HeartBeatResponse: + return heartBeatResponseToString(m.GetHeartBeatResponse()) + default: + return m.String() + } +} + +func prePrepareToString(prp *protos.PrePrepare) string { + if prp == nil { + return "" + } + if prp.Proposal == nil { + return fmt.Sprintf("", prp.View, prp.Seq) + } + return fmt.Sprintf("", + prp.View, prp.Seq, len(prp.Proposal.Payload), base64.StdEncoding.EncodeToString(prp.Proposal.Header)) +} + +func newViewToString(nv *protos.NewView) string { + if nv == nil || nv.SignedViewData == nil { + return "" + } + buff := bytes.Buffer{} + buff.WriteString("< NewView with ") + for i, svd := range nv.SignedViewData { + buff.WriteString(signedViewDataToString(svd)) + if i == len(nv.SignedViewData)-1 { + break + } + buff.WriteString(", ") + } + buff.WriteString(">") + return buff.String() +} + +func signedViewDataToString(svd *protos.SignedViewData) string { + if svd == nil { + return "empty ViewData" + } + vd := &protos.ViewData{} + if err := proto.Unmarshal(svd.RawViewData, vd); err != nil { + return fmt.Sprintf("", svd.Signer) + } + + return fmt.Sprintf("", + svd.Signer, vd.NextView) +} + +func heartBeatToString(hb *protos.HeartBeat) string { + if hb == nil { + return "empty HeartBeat" + } + + return fmt.Sprintf(" bl.f { + bl.logger.Infof("Removing %d from %d sized blacklist due to size constraint", newBlacklist[0], len(newBlacklist)) + newBlacklist = newBlacklist[1:] + } + + if len(bl.prevMD.BlackList) != len(newBlacklist) { + bl.logger.Infof("Blacklist changed: %v --> %v", bl.prevMD.BlackList, newBlacklist) + } + + return newBlacklist +} + +// pruneBlacklist receives the previous blacklist, prepare acknowledgements from nodes, and returns +// the new blacklist such that a node that was observed by more than f observers is removed from the blacklist, +// and all nodes that no longer exist are also removed from the blacklist. +func pruneBlacklist(prevBlacklist []uint64, preparesFrom map[uint64]*protos.PreparesFrom, f int, nodes []uint64, logger api.Logger) []uint64 { + if len(prevBlacklist) == 0 { + logger.Debugf("Blacklist empty, nothing to prune") + return prevBlacklist + } + logger.Debugf("Pruning blacklist %v with %d acknowledgements, f=%d, n=%d", prevBlacklist, len(preparesFrom), f, len(nodes)) + // Build a set of all nodes + currentNodeIDs := make(map[uint64]struct{}) + for _, n := range nodes { + currentNodeIDs[n] = struct{}{} + } + + // For each sender of a prepare, count the number of commit signatures which acknowledge receiving a prepare from it. + nodeID2Acks := make(map[uint64]int) + for from, gotPrepareFrom := range preparesFrom { + logger.Debugf("%d observed prepares from %v", from, gotPrepareFrom) + for _, prepareSender := range gotPrepareFrom.Ids { + nodeID2Acks[prepareSender]++ + } + } + + var newBlackList []uint64 + for _, blackListedNode := range prevBlacklist { + // Purge nodes that were removed by a reconfiguration + if _, exists := currentNodeIDs[blackListedNode]; !exists { + logger.Infof("Node %d no longer exists, removing it from the blacklist", blackListedNode) + continue + } + + // Purge nodes that have enough attestations of being alive + observers := nodeID2Acks[blackListedNode] + if observers > f { + logger.Infof("Node %d was observed sending a prepare by %d nodes, removing it from blacklist", blackListedNode, observers) + continue + } + newBlackList = append(newBlackList, blackListedNode) + } + + return newBlackList +} + +func equalIntLists(a, b []uint64) bool { + if len(a) != len(b) { + return false + } + + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true +} + +func CommitSignaturesDigest(sigs []*protos.Signature) []byte { + if len(sigs) == 0 { + return nil + } + idb := IntDoubleBytes{} + for _, sig := range sigs { + s := IntDoubleByte{ + A: int64(sig.Signer), + B: sig.Value, + C: sig.Msg, + } + idb.A = append(idb.A, s) + } + + serializedSignatures, err := asn1.Marshal(idb) + if err != nil { + panic(fmt.Sprintf("failed serializing signatures: %v", err)) + } + + h := sha256.New() + h.Write(serializedSignatures) + return h.Sum(nil) +} + +type IntDoubleByte struct { + A int64 + B, C []byte +} + +type IntDoubleBytes struct { + A []IntDoubleByte +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/view.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/view.go new file mode 100644 index 00000000000..879194322ac --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/view.go @@ -0,0 +1,1032 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" +) + +// Phase indicates the status of the view +type Phase uint8 + +// These are the different phases +const ( + COMMITTED = iota + PROPOSED + PREPARED + ABORT +) + +// State can save and restore the state +//go:generate mockery -dir . -name State -case underscore -output ./mocks/ +type State interface { + // Save saves a message. + Save(message *protos.SavedMessage) error + + // Restore restores the given view to its latest state + // before a crash, if applicable. + Restore(*View) error +} + +// Comm adds broadcast to the regular comm interface +type Comm interface { + api.Comm + BroadcastConsensus(m *protos.Message) +} + +type CheckpointRetriever func() (protos.Proposal, []*protos.Signature) + +// View is responsible for running the view protocol +type View struct { + // Configuration + DecisionsPerLeader uint64 + RetrieveCheckpoint CheckpointRetriever + SelfID uint64 + N uint64 + LeaderID uint64 + Quorum int + Number uint64 + Decider Decider + FailureDetector FailureDetector + Sync Synchronizer + Logger api.Logger + Comm Comm + Verifier api.Verifier + Signer api.Signer + MembershipNotifier api.MembershipNotifier + ProposalSequence uint64 + DecisionsInView uint64 + State State + Phase Phase + InMsgQSize int + // Runtime + lastVotedProposalByID map[uint64]protos.Commit + incMsgs chan *incMsg + myProposalSig *types.Signature + inFlightProposal *types.Proposal + inFlightRequests []types.RequestInfo + lastBroadcastSent *protos.Message + // Current sequence sent prepare and commit + currPrepareSent *protos.Message + currCommitSent *protos.Message + // Prev sequence sent prepare and commit + // to help lagging replicas catch up + prevPrepareSent *protos.Message + prevCommitSent *protos.Message + // Current proposal + prePrepare chan *protos.Message + prepares *voteSet + commits *voteSet + // Next proposal + nextPrePrepare chan *protos.Message + nextPrepares *voteSet + nextCommits *voteSet + + blacklistSupported bool + abortChan chan struct{} + stopOnce sync.Once + viewEnded sync.WaitGroup + + ViewSequences *atomic.Value +} + +// Start starts the view +func (v *View) Start() { + v.stopOnce = sync.Once{} + v.incMsgs = make(chan *incMsg, v.InMsgQSize) + v.abortChan = make(chan struct{}) + v.lastVotedProposalByID = make(map[uint64]protos.Commit) + v.viewEnded.Add(1) + + v.prePrepare = make(chan *protos.Message, 1) + v.nextPrePrepare = make(chan *protos.Message, 1) + + v.setupVotes() + + go func() { + v.run() + }() +} + +func (v *View) setupVotes() { + // Prepares + acceptPrepares := func(_ uint64, message *protos.Message) bool { + return message.GetPrepare() != nil + } + + v.prepares = &voteSet{ + validVote: acceptPrepares, + } + v.prepares.clear(v.N) + + v.nextPrepares = &voteSet{ + validVote: acceptPrepares, + } + v.nextPrepares.clear(v.N) + + // Commits + acceptCommits := func(sender uint64, message *protos.Message) bool { + commit := message.GetCommit() + if commit == nil { + return false + } + if commit.Signature == nil { + return false + } + // Sender needs to match the inner signature sender + return commit.Signature.Signer == sender + } + + v.commits = &voteSet{ + validVote: acceptCommits, + } + v.commits.clear(v.N) + + v.nextCommits = &voteSet{ + validVote: acceptCommits, + } + v.nextCommits.clear(v.N) +} + +// HandleMessage handles incoming messages +func (v *View) HandleMessage(sender uint64, m *protos.Message) { + msg := &incMsg{sender: sender, Message: m} + select { + case <-v.abortChan: + return + case v.incMsgs <- msg: + } +} + +func (v *View) processMsg(sender uint64, m *protos.Message) { + if v.stopped() { + return + } + // Ensure view number is equal to our view + msgViewNum := viewNumber(m) + msgProposalSeq := proposalSequence(m) + + if msgViewNum != v.Number { + v.Logger.Warnf("%d got message %v from %d of view %d, expected view %d", v.SelfID, m, sender, msgViewNum, v.Number) + if sender != v.LeaderID { + v.discoverIfSyncNeeded(sender, m) + return + } + v.FailureDetector.Complain(v.Number, false) + // Else, we got a message with a wrong view from the leader. + if msgViewNum > v.Number { + v.Sync.Sync() + } + v.stop() + return + } + + if msgProposalSeq == v.ProposalSequence-1 && v.ProposalSequence > 0 { + v.handlePrevSeqMessage(msgProposalSeq, sender, m) + return + } + + v.Logger.Debugf("%d got message %s from %d with seq %d", v.SelfID, MsgToString(m), sender, msgProposalSeq) + // This message is either for this proposal or the next one (we might be behind the rest) + if msgProposalSeq != v.ProposalSequence && msgProposalSeq != v.ProposalSequence+1 { + v.Logger.Warnf("%d got message from %d with sequence %d but our sequence is %d", v.SelfID, sender, msgProposalSeq, v.ProposalSequence) + v.discoverIfSyncNeeded(sender, m) + return + } + + msgForNextProposal := msgProposalSeq == v.ProposalSequence+1 + + if pp := m.GetPrePrepare(); pp != nil { + v.processPrePrepare(pp, m, msgForNextProposal, sender) + return + } + + // Else, it's a prepare or a commit. + // Ignore votes from ourselves. + if sender == v.SelfID { + return + } + + if prp := m.GetPrepare(); prp != nil { + if msgForNextProposal { + v.nextPrepares.registerVote(sender, m) + } else { + v.prepares.registerVote(sender, m) + } + return + } + + if cmt := m.GetCommit(); cmt != nil { + if msgForNextProposal { + v.nextCommits.registerVote(sender, m) + } else { + v.commits.registerVote(sender, m) + } + return + } +} + +func (v *View) run() { + defer v.viewEnded.Done() + defer func() { + v.ViewSequences.Store(ViewSequence{ + ProposalSeq: v.ProposalSequence, + ViewActive: false, + }) + }() + for { + select { + case <-v.abortChan: + return + case msg := <-v.incMsgs: + v.processMsg(msg.sender, msg.Message) + default: + v.doPhase() + } + } +} + +func (v *View) doPhase() { + switch v.Phase { + case PROPOSED: + v.Comm.BroadcastConsensus(v.lastBroadcastSent) // broadcast here serves also recovery + v.Phase = v.processPrepares() + case PREPARED: + v.Comm.BroadcastConsensus(v.lastBroadcastSent) + v.Phase = v.prepared() + case COMMITTED: + v.Phase = v.processProposal() + case ABORT: + return + default: + v.Logger.Panicf("Unknown phase in view : %v", v) + } +} + +func (v *View) processPrePrepare(pp *protos.PrePrepare, m *protos.Message, msgForNextProposal bool, sender uint64) { + if pp.Proposal == nil { + v.Logger.Warnf("%d got pre-prepare from %d with empty proposal", v.SelfID, sender) + return + } + if sender != v.LeaderID { + v.Logger.Warnf("%d got pre-prepare from %d but the leader is %d", v.SelfID, sender, v.LeaderID) + return + } + + prePrepareChan := v.prePrepare + currentOrNext := "current" + + if msgForNextProposal { + prePrepareChan = v.nextPrePrepare + currentOrNext = "next" + } + + select { + case prePrepareChan <- m: + default: + v.Logger.Warnf("Got a pre-prepare for %s sequence without processing previous one, dropping message", currentOrNext) + } +} + +func (v *View) prepared() Phase { + proposal := v.inFlightProposal + signatures, phase := v.processCommits(proposal) + if phase == ABORT { + return ABORT + } + + seq := v.ProposalSequence + + v.Logger.Infof("%d processed commits for proposal with seq %d", v.SelfID, seq) + + v.decide(proposal, signatures, v.inFlightRequests) + return COMMITTED +} + +func (v *View) processProposal() Phase { + v.prevPrepareSent = v.currPrepareSent + v.prevCommitSent = v.currCommitSent + v.currPrepareSent = nil + v.currCommitSent = nil + v.inFlightProposal = nil + v.inFlightRequests = nil + v.lastBroadcastSent = nil + + var proposal types.Proposal + var receivedProposal *protos.Message + var prevCommits []*protos.Signature + + var gotPrePrepare bool + for !gotPrePrepare { + select { + case <-v.abortChan: + return ABORT + case msg := <-v.incMsgs: + v.processMsg(msg.sender, msg.Message) + case msg := <-v.prePrepare: + gotPrePrepare = true + receivedProposal = msg + prePrepare := msg.GetPrePrepare() + prop := prePrepare.Proposal + prevCommits = prePrepare.PrevCommitSignatures + proposal = types.Proposal{ + VerificationSequence: int64(prop.VerificationSequence), + Metadata: prop.Metadata, + Payload: prop.Payload, + Header: prop.Header, + } + } + } + + requests, err := v.verifyProposal(proposal, prevCommits) + if err != nil { + v.Logger.Warnf("%d received bad proposal from %d: %v", v.SelfID, v.LeaderID, err) + v.FailureDetector.Complain(v.Number, false) + v.Sync.Sync() + v.stop() + return ABORT + } + + seq := v.ProposalSequence + + prepareMessage := v.createPrepare(seq, proposal) + + // We are about to send a prepare for a pre-prepare, + // so we record the pre-prepare. + savedMsg := &protos.SavedMessage{ + Content: &protos.SavedMessage_ProposedRecord{ + ProposedRecord: &protos.ProposedRecord{ + PrePrepare: receivedProposal.GetPrePrepare(), + Prepare: prepareMessage.GetPrepare(), + }, + }, + } + if err := v.State.Save(savedMsg); err != nil { + v.Logger.Panicf("Failed to save message to state, error: %v", err) + } + v.lastBroadcastSent = prepareMessage + v.currPrepareSent = proto.Clone(prepareMessage).(*protos.Message) + v.currPrepareSent.GetPrepare().Assist = true + v.inFlightProposal = &proposal + v.inFlightRequests = requests + + if v.SelfID == v.LeaderID { + v.Comm.BroadcastConsensus(receivedProposal) + } + + v.Logger.Infof("Processed proposal with seq %d", seq) + return PROPOSED +} + +func (v *View) createPrepare(seq uint64, proposal types.Proposal) *protos.Message { + return &protos.Message{ + Content: &protos.Message_Prepare{ + Prepare: &protos.Prepare{ + Seq: seq, + View: v.Number, + Digest: proposal.Digest(), + }, + }, + } +} + +func (v *View) processPrepares() Phase { + proposal := v.inFlightProposal + expectedDigest := proposal.Digest() + + var voterIDs []uint64 + for len(voterIDs) < v.Quorum-1 { + select { + case <-v.abortChan: + return ABORT + case msg := <-v.incMsgs: + v.processMsg(msg.sender, msg.Message) + case vote := <-v.prepares.votes: + prepare := vote.GetPrepare() + if prepare.Digest != expectedDigest { + seq := v.ProposalSequence + v.Logger.Warnf("Got wrong digest at processPrepares for prepare with seq %d, expecting %v but got %v, we are in seq %d", prepare.Seq, expectedDigest, prepare.Digest, seq) + continue + } + voterIDs = append(voterIDs, vote.sender) + } + } + + v.Logger.Infof("%d collected %d prepares from %v", v.SelfID, len(voterIDs), voterIDs) + + // SignProposal returns a types.Signature with the following 3 fields: + // ID: The integer that represents this node. + // Value: The signature, encoded according to the specific signature specification. + // Msg: A succinct representation of the proposal that binds this proposal unequivocally. + + // The block proof consists of the aggregation of all these signatures from 2f+1 commits of different nodes. + + prpFrom := &protos.PreparesFrom{ + Ids: voterIDs, + } + + prpFromRaw, err := proto.Marshal(prpFrom) + if err != nil { + v.Logger.Panicf("Failed marshaling prepares from: %v", err) + } + + v.myProposalSig = v.Signer.SignProposal(*proposal, prpFromRaw) + + seq := v.ProposalSequence + + commitMsg := &protos.Message{ + Content: &protos.Message_Commit{ + Commit: &protos.Commit{ + View: v.Number, + Digest: expectedDigest, + Seq: seq, + Signature: &protos.Signature{ + Signer: v.myProposalSig.ID, + Value: v.myProposalSig.Value, + Msg: v.myProposalSig.Msg, + }, + }, + }, + } + + preparedProof := &protos.SavedMessage{ + Content: &protos.SavedMessage_Commit{ + Commit: commitMsg, + }, + } + + // We received enough prepares to send a commit. + // Save the commit message we are about to send. + if err := v.State.Save(preparedProof); err != nil { + v.Logger.Panicf("Failed to save message to state, error: %v", err) + } + v.currCommitSent = proto.Clone(commitMsg).(*protos.Message) + v.currCommitSent.GetCommit().Assist = true + v.lastBroadcastSent = commitMsg + + v.Logger.Infof("Processed prepares for proposal with seq %d", seq) + return PREPARED +} + +func (v *View) processCommits(proposal *types.Proposal) ([]types.Signature, Phase) { + var signatures []types.Signature + + signatureCollector := &voteVerifier{ + validVotes: make(chan types.Signature, cap(v.commits.votes)), + expectedDigest: proposal.Digest(), + proposal: proposal, + v: v, + } + + var voterIDs []uint64 + + for len(signatures) < v.Quorum-1 { + select { + case <-v.abortChan: + return nil, ABORT + case msg := <-v.incMsgs: + v.processMsg(msg.sender, msg.Message) + case vote := <-v.commits.votes: + // Valid votes end up written into the 'validVotes' channel. + go func(vote *protos.Message) { + signatureCollector.verifyVote(vote) + }(vote.Message) + case signature := <-signatureCollector.validVotes: + signatures = append(signatures, signature) + voterIDs = append(voterIDs, signature.ID) + } + } + + v.Logger.Infof("%d collected %d commits from %v", v.SelfID, len(signatures), voterIDs) + + return signatures, COMMITTED +} + +func (v *View) verifyProposal(proposal types.Proposal, prevCommits []*protos.Signature) ([]types.RequestInfo, error) { + // Verify proposal has correct structure and contains authorized requests. + requests, err := v.Verifier.VerifyProposal(proposal) + if err != nil { + v.Logger.Warnf("Received bad proposal: %v", err) + return nil, err + } + + // Verify proposal's metadata is valid. + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(proposal.Metadata, md); err != nil { + return nil, err + } + + if md.ViewId != v.Number { + v.Logger.Warnf("Expected view number %d but got %d", v.Number, md.ViewId) + return nil, errors.New("invalid view number") + } + + if md.LatestSequence != v.ProposalSequence { + v.Logger.Warnf("Expected proposal sequence %d but got %d", v.ProposalSequence, md.LatestSequence) + return nil, errors.New("invalid proposal sequence") + } + + if md.DecisionsInView != v.DecisionsInView { + v.Logger.Warnf("Expected decisions in view %d but got %d", v.DecisionsInView, md.DecisionsInView) + return nil, errors.New("invalid decisions in view") + } + + expectedSeq := v.Verifier.VerificationSequence() + if uint64(proposal.VerificationSequence) != expectedSeq { + v.Logger.Warnf("Expected verification sequence %d but got %d", expectedSeq, proposal.VerificationSequence) + return nil, errors.New("verification sequence mismatch") + } + + prepareAcknowledgements, err := v.verifyPrevCommitSignatures(prevCommits, expectedSeq) + if err != nil { + return nil, err + } + + if err := v.verifyBlacklist(prevCommits, expectedSeq, md.BlackList, prepareAcknowledgements); err != nil { + return nil, err + } + + // Check that the metadata contains a digest of the previous commit signatures + prevCommitDigest := CommitSignaturesDigest(prevCommits) + if !bytes.Equal(prevCommitDigest, md.PrevCommitSignatureDigest) && v.DecisionsPerLeader > 0 { + return nil, errors.Errorf("prev commit signatures received from leader mismatches the metadata digest") + } + + return requests, nil +} + +func (v *View) verifyPrevCommitSignatures(prevCommitSignatures []*protos.Signature, currVerificationSeq uint64) (map[uint64]*protos.PreparesFrom, error) { + prevPropRaw, _ := v.RetrieveCheckpoint() + prevProposalMetadata := &protos.ViewMetadata{} + if err := proto.Unmarshal(prevPropRaw.Metadata, prevProposalMetadata); err != nil { + v.Logger.Panicf("Couldn't unmarshal the previous persisted proposal metadata: %v", err) + } + + v.Logger.Debugf("Previous proposal verification sequence: %d, current verification sequence: %d", prevPropRaw.VerificationSequence, currVerificationSeq) + if prevPropRaw.VerificationSequence != currVerificationSeq { + v.Logger.Infof("Skipping verifying prev commit signatures due to verification sequence advancing from %d to %d", + prevPropRaw.VerificationSequence, currVerificationSeq) + return nil, nil + } + + prepareAcknowledgements := make(map[uint64]*protos.PreparesFrom) + + prevProp := types.Proposal{ + VerificationSequence: int64(prevPropRaw.VerificationSequence), + Metadata: prevPropRaw.Metadata, + Payload: prevPropRaw.Payload, + Header: prevPropRaw.Header, + } + + // All previous commit signatures should be verifiable + for _, sig := range prevCommitSignatures { + aux, err := v.Verifier.VerifyConsenterSig(types.Signature{ + ID: sig.Signer, + Msg: sig.Msg, + Value: sig.Value, + }, prevProp) + if err != nil { + return nil, errors.Errorf("failed verifying consenter signature of %d: %v", sig.Signer, err) + } + prpf := &protos.PreparesFrom{} + if err := proto.Unmarshal(aux, prpf); err != nil { + return nil, errors.Errorf("failed unmarshaling auxiliary input from %d: %v", sig.Signer, err) + } + prepareAcknowledgements[sig.Signer] = prpf + } + + return prepareAcknowledgements, nil +} + +func (v *View) verifyBlacklist(prevCommitSignatures []*protos.Signature, currVerificationSeq uint64, pendingBlacklist []uint64, prepareAcknowledgements map[uint64]*protos.PreparesFrom) error { + if v.DecisionsPerLeader == 0 { + v.Logger.Debugf("DecisionsPerLeader is 0, hence leader rotation is inactive") + if len(pendingBlacklist) > 0 { + v.Logger.Warnf("Blacklist cannot be non-empty (%v) if rotation is inactive", pendingBlacklist) + return errors.Errorf("rotation is inactive but blacklist is not empty: %v", pendingBlacklist) + } + return nil + } + + prevPropRaw, myLastCommitSignatures := v.RetrieveCheckpoint() + prevProposalMetadata := &protos.ViewMetadata{} + if err := proto.Unmarshal(prevPropRaw.Metadata, prevProposalMetadata); err != nil { + v.Logger.Panicf("Couldn't unmarshal the previous persisted proposal metadata: %v", err) + } + + v.Logger.Debugf("Previous proposal verification sequence: %d, current verification sequence: %d", prevPropRaw.VerificationSequence, currVerificationSeq) + if prevPropRaw.VerificationSequence != currVerificationSeq { + // If there has been a reconfiguration, black list should remain the same + if !equalIntLists(prevProposalMetadata.BlackList, pendingBlacklist) { + return errors.Errorf("blacklist changed (%v --> %v) during reconfiguration", prevProposalMetadata.BlackList, pendingBlacklist) + } + v.Logger.Infof("Skipping verifying prev commits due to verification sequence advancing from %d to %d", + prevPropRaw.VerificationSequence, currVerificationSeq) + return nil + } + + if v.MembershipNotifier.MembershipChange() { + // If there has been a membership change, black list should remain the same + if !equalIntLists(prevProposalMetadata.BlackList, pendingBlacklist) { + return errors.Errorf("blacklist changed (%v --> %v) during membership change", prevProposalMetadata.BlackList, pendingBlacklist) + } + v.Logger.Infof("Skipping verifying prev commits due to membership change") + return nil + } + + _, f := computeQuorum(v.N) + + if v.blacklistingSupported(f, myLastCommitSignatures) && len(prevCommitSignatures) < len(myLastCommitSignatures) { + return errors.Errorf("only %d out of %d required previous commits is included in pre-prepare", + len(prevCommitSignatures), len(myLastCommitSignatures)) + } + + // We previously verified the previous commit signatures, now we need to ensure that the blacklist + // of this proposal is obtained by applying the deterministic blacklist maintenance algorithm + // on the blacklist of the previous proposal which has been committed. + + blacklist := &blacklist{ + currentLeader: v.LeaderID, + leaderRotation: v.DecisionsPerLeader > 0, + n: v.N, + prevMD: prevProposalMetadata, + decisionsPerLeader: v.DecisionsPerLeader, + preparesFrom: prepareAcknowledgements, + f: f, + logger: v.Logger, + nodes: v.Comm.Nodes(), + currView: v.Number, + } + + expectedBlacklist := blacklist.computeUpdate() + if !equalIntLists(pendingBlacklist, expectedBlacklist) { + return errors.Errorf("proposed blacklist %v differs from expected %v blacklist", pendingBlacklist, expectedBlacklist) + } + + return nil +} + +func (v *View) handlePrevSeqMessage(msgProposalSeq, sender uint64, m *protos.Message) { + if m.GetPrePrepare() != nil { + v.Logger.Warnf("Got pre-prepare for sequence %d but we're in sequence %d", msgProposalSeq, v.ProposalSequence) + return + } + msgType := "prepare" + if m.GetCommit() != nil { + msgType = "commit" + } + + var found bool + + switch msgType { + case "prepare": + // This is an assist message, we don't need to reply to it. + if m.GetPrepare().Assist { + return + } + if v.prevPrepareSent != nil { + v.Comm.SendConsensus(sender, v.prevPrepareSent) + found = true + } + case "commit": + // This is an assist message, we don't need to reply to it. + if m.GetCommit().Assist { + return + } + if v.prevCommitSent != nil { + v.Comm.SendConsensus(sender, v.prevCommitSent) + found = true + } + } + + prevMsgFound := fmt.Sprintf("but didn't have a previous %s to send back.", msgType) + if found { + prevMsgFound = fmt.Sprintf("sent back previous %s.", msgType) + } + v.Logger.Debugf("Got %s for previous sequence (%d) from %d, %s", msgType, msgProposalSeq, sender, prevMsgFound) +} + +func (v *View) discoverIfSyncNeeded(sender uint64, m *protos.Message) { + // We're only interested in commit messages. + commit := m.GetCommit() + if commit == nil { + return + } + + // To commit a block we need 2f + 1 votes. + // at least f+1 of them are honest and will broadcast + // their commits to votes to everyone including us. + // In each such a threshold of f+1 votes there is at least + // a single honest node that prepared for a proposal + // which we apparently missed. + _, f := computeQuorum(v.N) + threshold := f + 1 + + v.lastVotedProposalByID[sender] = *commit + + v.Logger.Debugf("Got commit of seq %d in view %d from %d while being in seq %d in view %d", + commit.Seq, commit.View, sender, v.ProposalSequence, v.Number) + + // If we haven't reached a threshold of proposals yet, abort. + if len(v.lastVotedProposalByID) < threshold { + return + } + + // Make a histogram out of all current seen votes. + countsByVotes := make(map[proposalInfo]int) + for _, vote := range v.lastVotedProposalByID { + info := proposalInfo{ + digest: vote.Digest, + view: vote.View, + seq: vote.Seq, + } + countsByVotes[info]++ + } + + // Check if there is a that collected a threshold of votes, + // and that sequence is higher than our current sequence, or our view is different. + for vote, count := range countsByVotes { + if count < threshold { + continue + } + + // Disregard votes for past views. + if vote.view < v.Number { + continue + } + + // Disregard votes for past sequences for this view. + if vote.seq <= v.ProposalSequence && vote.view == v.Number { + continue + } + + v.Logger.Warnf("Seen %d votes for digest %s in view %d, sequence %d but I am in view %d and seq %d", + count, vote.digest, vote.view, vote.seq, v.Number, v.ProposalSequence) + v.stop() + v.Sync.Sync() + return + } +} + +type voteVerifier struct { + v *View + proposal *types.Proposal + expectedDigest string + validVotes chan types.Signature +} + +func (vv *voteVerifier) verifyVote(vote *protos.Message) { + commit := vote.GetCommit() + if commit.Digest != vv.expectedDigest { + vv.v.Logger.Warnf("Got wrong digest at processCommits for seq %d", commit.Seq) + return + } + + _, err := vv.v.Verifier.VerifyConsenterSig(types.Signature{ + ID: commit.Signature.Signer, + Value: commit.Signature.Value, + Msg: commit.Signature.Msg, + }, *vv.proposal) + if err != nil { + vv.v.Logger.Warnf("Couldn't verify %d's signature: %v", commit.Signature.Signer, err) + return + } + + vv.validVotes <- types.Signature{ + ID: commit.Signature.Signer, + Value: commit.Signature.Value, + Msg: commit.Signature.Msg, + } +} + +func (v *View) decide(proposal *types.Proposal, signatures []types.Signature, requests []types.RequestInfo) { + v.Logger.Infof("Deciding on seq %d", v.ProposalSequence) + v.ViewSequences.Store(ViewSequence{ProposalSeq: v.ProposalSequence, ViewActive: true}) + // first make preparations for the next sequence so that the view will be ready to continue right after delivery + v.startNextSeq() + signatures = append(signatures, *v.myProposalSig) + v.Decider.Decide(*proposal, signatures, requests) +} + +func (v *View) startNextSeq() { + prevSeq := v.ProposalSequence + + v.ProposalSequence++ + v.DecisionsInView++ + + nextSeq := v.ProposalSequence + + v.Logger.Infof("Sequence: %d-->%d", prevSeq, nextSeq) + + // swap next prePrepare + tmp := v.prePrepare + v.prePrepare = v.nextPrePrepare + // clear tmp + for len(tmp) > 0 { + <-tmp + } + tmp = make(chan *protos.Message, 1) + v.nextPrePrepare = tmp + + // swap next prepares + tmpVotes := v.prepares + v.prepares = v.nextPrepares + tmpVotes.clear(v.N) + v.nextPrepares = tmpVotes + + // swap next commits + tmpVotes = v.commits + v.commits = v.nextCommits + tmpVotes.clear(v.N) + v.nextCommits = tmpVotes +} + +// GetMetadata returns the current sequence and view number (in a marshaled ViewMetadata protobuf message) +func (v *View) GetMetadata() []byte { + metadata := &protos.ViewMetadata{ + ViewId: v.Number, + LatestSequence: v.ProposalSequence, + DecisionsInView: v.DecisionsInView, + } + + var prevSigs []*protos.Signature + var prevProp protos.Proposal + verificationSeq := v.Verifier.VerificationSequence() + + prevProp, prevSigs = v.RetrieveCheckpoint() + + prevMD := &protos.ViewMetadata{} + if err := proto.Unmarshal(prevProp.Metadata, prevMD); err != nil { + v.Logger.Panicf("Attempted to propose a proposal with invalid unchanged previous proposal view metadata: %v", err) + } + + metadata.BlackList = prevMD.BlackList + + metadata = v.metadataWithUpdatedBlacklist(metadata, verificationSeq, prevProp, prevSigs) + metadata = v.bindCommitSignaturesToProposalMetadata(metadata, prevSigs) + + return MarshalOrPanic(metadata) +} + +func (v *View) metadataWithUpdatedBlacklist(metadata *protos.ViewMetadata, verificationSeq uint64, prevProp protos.Proposal, prevSigs []*protos.Signature) *protos.ViewMetadata { + membershipChange := v.MembershipNotifier.MembershipChange() + if verificationSeq == prevProp.VerificationSequence && !membershipChange { + v.Logger.Debugf("Proposing proposal %d with verification sequence of %d and %d commit signatures", + v.ProposalSequence, verificationSeq, len(prevSigs)) + return v.updateBlacklistMetadata(metadata, prevSigs, prevProp.Metadata) + } + + if verificationSeq != prevProp.VerificationSequence { + v.Logger.Infof("Skipping updating blacklist due to verification sequence changing from %d to %d", + prevProp.VerificationSequence, verificationSeq) + } + if membershipChange { + v.Logger.Infof("Skipping updating blacklist due to membership change") + } + + return metadata +} + +// Propose broadcasts a prePrepare message with the given proposal +func (v *View) Propose(proposal types.Proposal) { + _, prevSigs := v.RetrieveCheckpoint() + + seq := v.ProposalSequence + msg := &protos.Message{ + Content: &protos.Message_PrePrepare{ + PrePrepare: &protos.PrePrepare{ + View: v.Number, + Seq: seq, + Proposal: &protos.Proposal{ + Header: proposal.Header, + Payload: proposal.Payload, + Metadata: proposal.Metadata, + VerificationSequence: uint64(proposal.VerificationSequence), + }, + PrevCommitSignatures: prevSigs, + }, + }, + } + // Send the proposal to yourself in order to pre-prepare yourself and record + // it in the WAL before sending it to other nodes. + v.HandleMessage(v.LeaderID, msg) + v.Logger.Debugf("Proposing proposal sequence %d in view %d", seq, v.Number) +} + +func (v *View) bindCommitSignaturesToProposalMetadata(metadata *protos.ViewMetadata, prevSigs []*protos.Signature) *protos.ViewMetadata { + if v.DecisionsPerLeader == 0 { + v.Logger.Debugf("Leader rotation is disabled, will not bind signatures to proposals") + return metadata + } + metadata.PrevCommitSignatureDigest = CommitSignaturesDigest(prevSigs) + + if len(metadata.PrevCommitSignatureDigest) == 0 { + v.Logger.Debugf("No previous commit signatures detected") + } else { + v.Logger.Debugf("Bound %d commit signatures to proposal", len(prevSigs)) + } + return metadata +} + +func (v *View) stop() { + v.stopOnce.Do(func() { + if v.abortChan == nil { + return + } + close(v.abortChan) + }) +} + +// Abort forces the view to end +func (v *View) Abort() { + v.stop() + v.viewEnded.Wait() +} + +func (v *View) stopped() bool { + select { + case <-v.abortChan: + return true + default: + return false + } +} + +func (v *View) updateBlacklistMetadata(metadata *protos.ViewMetadata, prevSigs []*protos.Signature, prevMetadata []byte) *protos.ViewMetadata { + if v.DecisionsPerLeader == 0 { + v.Logger.Debugf("Rotation is disabled, setting blacklist to be empty") + metadata.BlackList = nil + return metadata + } + + preparesFrom := make(map[uint64]*protos.PreparesFrom) + + for _, sig := range prevSigs { + aux := v.Verifier.AuxiliaryData(sig.Msg) + prpf := &protos.PreparesFrom{} + if err := proto.Unmarshal(aux, prpf); err != nil { + v.Logger.Panicf("Failed unmarshalling auxiliary data from previously persisted signatures: %v", err) + } + preparesFrom[sig.Signer] = prpf + } + + prevMD := &protos.ViewMetadata{} + if err := proto.Unmarshal(prevMetadata, prevMD); err != nil { + v.Logger.Panicf("Attempted to propose a proposal with invalid previous proposal view metadata: %v", err) + } + + _, f := computeQuorum(v.N) + + blacklist := &blacklist{ + currentLeader: v.LeaderID, + leaderRotation: v.DecisionsPerLeader > 0, + currView: metadata.ViewId, + prevMD: prevMD, + nodes: v.Comm.Nodes(), + f: f, + n: v.N, + logger: v.Logger, + preparesFrom: preparesFrom, + decisionsPerLeader: v.DecisionsPerLeader, + } + metadata.BlackList = blacklist.computeUpdate() + return metadata +} + +func (v *View) blacklistingSupported(f int, myLastCommitSignatures []*protos.Signature) bool { + // Once we blacklist, there is no way back. This is a one way trip, unless we downgrade the version + // in all nodes and view change. + if v.blacklistSupported { + return true + } + // We wish to find whether there are f+1 witnesses for blacklisting being + // activated among the signed commits of the previous proposal. + var count int + for _, commitSig := range myLastCommitSignatures { + aux := v.Verifier.AuxiliaryData(commitSig.Msg) + if len(aux) > 0 { + count++ + } + } + + v.Logger.Debugf("Found %d out of %d required witnesses for auxiliary data", count, f+1) + + blacklistSupported := count > f + + // We cache the result in case it is 'true'. + // Subsequent invocations will skip the parsing. + v.blacklistSupported = v.blacklistSupported || blacklistSupported + return blacklistSupported +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/internal/bft/viewchanger.go b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/viewchanger.go new file mode 100644 index 00000000000..164c29d2104 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/internal/bft/viewchanger.go @@ -0,0 +1,1271 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package bft + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" +) + +// ViewController controls the view +//go:generate mockery -dir . -name ViewController -case underscore -output ./mocks/ +type ViewController interface { + ViewChanged(newViewNumber uint64, newProposalSequence uint64) + AbortView(view uint64) +} + +// Pruner prunes revoked requests +//go:generate mockery -dir . -name Pruner -case underscore -output ./mocks/ +type Pruner interface { + MaybePruneRevokedRequests() +} + +// RequestsTimer controls requests +//go:generate mockery -dir . -name RequestsTimer -case underscore -output ./mocks/ +type RequestsTimer interface { + StopTimers() + RestartTimers() + RemoveRequest(request types.RequestInfo) error +} + +type change struct { + view uint64 + stopView bool +} + +// ViewChanger is responsible for running the view change protocol +type ViewChanger struct { + // Configuration + SelfID uint64 + NodesList []uint64 + N uint64 + f int + quorum int + SpeedUpViewChange bool + LeaderRotation bool + DecisionsPerLeader uint64 + + Logger api.Logger + Comm Comm + Signer api.Signer + Verifier api.Verifier + Application api.Application + Synchronizer Synchronizer + + Checkpoint *types.Checkpoint + InFlight *InFlightData + State State + + Controller ViewController + RequestsTimer RequestsTimer + Pruner Pruner + + // for the in flight proposal view + ViewSequences *atomic.Value + inFlightDecideChan chan struct{} + inFlightSyncChan chan struct{} + inFlightView *View + inFlightViewLock sync.RWMutex + + Ticker <-chan time.Time + lastTick time.Time + ResendTimeout time.Duration + lastResend time.Time + ViewChangeTimeout time.Duration + startViewChangeTime time.Time + checkTimeout bool + backOffFactor uint64 + + // Runtime + Restore chan struct{} + InMsqQSize int + incMsgs chan *incMsg + viewChangeMsgs *voteSet + viewDataMsgs *voteSet + currView uint64 + nextView uint64 + startChangeChan chan *change + informChan chan uint64 + + stopOnce sync.Once + stopChan chan struct{} + vcDone sync.WaitGroup + + ControllerStartedWG sync.WaitGroup +} + +// Start the view changer +func (v *ViewChanger) Start(startViewNumber uint64) { + v.incMsgs = make(chan *incMsg, v.InMsqQSize) + v.startChangeChan = make(chan *change, 1) + v.informChan = make(chan uint64, 1) + + v.quorum, v.f = computeQuorum(v.N) + + v.stopChan = make(chan struct{}) + v.stopOnce = sync.Once{} + v.vcDone.Add(1) + + v.setupVotes() + + // set without locking + v.currView = startViewNumber + v.nextView = v.currView + + v.lastTick = time.Now() + v.lastResend = v.lastTick + + v.backOffFactor = 1 + + v.inFlightDecideChan = make(chan struct{}) + v.inFlightSyncChan = make(chan struct{}) + + go func() { + defer v.vcDone.Done() + v.ControllerStartedWG.Wait() + v.run() + }() + +} + +func (v *ViewChanger) setupVotes() { + // view change + acceptViewChange := func(_ uint64, message *protos.Message) bool { + return message.GetViewChange() != nil + } + v.viewChangeMsgs = &voteSet{ + validVote: acceptViewChange, + } + v.viewChangeMsgs.clear(v.N) + + // view data + acceptViewData := func(_ uint64, message *protos.Message) bool { + return message.GetViewData() != nil + } + v.viewDataMsgs = &voteSet{ + validVote: acceptViewData, + } + v.viewDataMsgs.clear(v.N) +} + +func (v *ViewChanger) close() { + v.stopOnce.Do( + func() { + select { + case <-v.stopChan: + return + default: + close(v.stopChan) + } + }, + ) +} + +// Stop the view changer +func (v *ViewChanger) Stop() { + v.close() + v.vcDone.Wait() +} + +// HandleMessage passes a message to the view changer +func (v *ViewChanger) HandleMessage(sender uint64, m *protos.Message) { + msg := &incMsg{sender: sender, Message: m} + select { + case <-v.stopChan: + return + case v.incMsgs <- msg: + } +} + +func (v *ViewChanger) run() { + for { + select { + case <-v.stopChan: + return + case change := <-v.startChangeChan: + v.startViewChange(change) + case msg := <-v.incMsgs: + v.processMsg(msg.sender, msg.Message) + case now := <-v.Ticker: + v.lastTick = now + v.checkIfResendViewChange(now) + v.checkIfTimeout(now) + case info := <-v.informChan: + v.informNewView(info) + case <-v.Restore: + v.processViewChangeMsg(true) + } + } +} + +func (v *ViewChanger) getLeader() uint64 { + return getLeaderID(v.currView, v.N, v.NodesList, v.LeaderRotation, 0, v.DecisionsPerLeader, v.blacklist()) +} + +func (v *ViewChanger) checkIfResendViewChange(now time.Time) { + nextTimeout := v.lastResend.Add(v.ResendTimeout) + if nextTimeout.After(now) { // check if it is time to resend + return + } + if v.checkTimeout { // during view change process + msg := &protos.Message{ + Content: &protos.Message_ViewChange{ + ViewChange: &protos.ViewChange{ + NextView: v.nextView, + }, + }, + } + v.Comm.BroadcastConsensus(msg) + v.Logger.Debugf("Node %d resent a view change message with next view %d", v.SelfID, v.nextView) + v.lastResend = now // update last resend time, or at least last time we checked if we should resend + } +} + +func (v *ViewChanger) checkIfTimeout(now time.Time) { + if !v.checkTimeout { + return + } + nextTimeout := v.startViewChangeTime.Add(v.ViewChangeTimeout * time.Duration(v.backOffFactor)) + if nextTimeout.After(now) { // check if timeout has passed + return + } + v.Logger.Debugf("Node %d got a view change timeout, the current view is %d", v.SelfID, v.currView) + v.checkTimeout = false // stop timeout for now, a new one will start when a new view change begins + v.backOffFactor++ // next timeout will be longer + // the timeout has passed, something went wrong, try sync and complain + v.Logger.Debugf("Node %d is calling sync because it got a view change timeout", v.SelfID) + v.Synchronizer.Sync() + v.StartViewChange(v.currView, false) // don't stop the view, the sync maybe created a good view +} + +func (v *ViewChanger) processMsg(sender uint64, m *protos.Message) { + // viewChange message + if vc := m.GetViewChange(); vc != nil { + v.Logger.Debugf("Node %d is processing a view change message %v from %d with next view %d", v.SelfID, m, sender, vc.NextView) + // check view number + if vc.NextView != v.currView+1 { // accept view change only to immediate next view number + v.Logger.Warnf("Node %d got viewChange message %v from %d with view %d, expected view %d", v.SelfID, m, sender, vc.NextView, v.currView+1) + return + } + v.viewChangeMsgs.registerVote(sender, m) + v.processViewChangeMsg(false) + return + } + + //viewData message + if vd := m.GetViewData(); vd != nil { + v.Logger.Debugf("Node %d is processing a view data message %s from %d", v.SelfID, MsgToString(m), sender) + if !v.validateViewDataMsg(vd, sender) { + return + } + v.viewDataMsgs.registerVote(sender, m) + v.processViewDataMsg() + return + } + + // newView message + if nv := m.GetNewView(); nv != nil { + v.Logger.Debugf("Node %d is processing a new view message %s from %d", v.SelfID, MsgToString(m), sender) + leader := v.getLeader() + if sender != leader { + v.Logger.Warnf("Node %d got newView message %v from %d, expected sender to be %d the next leader", v.SelfID, MsgToString(m), sender, leader) + return + } + v.processNewViewMsg(nv) + } +} + +// InformNewView tells the view changer to advance to a new view number +func (v *ViewChanger) InformNewView(view uint64) { + select { + case v.informChan <- view: + case <-v.stopChan: + return + } +} + +func (v *ViewChanger) informNewView(view uint64) { + if view < v.currView { + v.Logger.Debugf("Node %d was informed of view %d, but the current view is %d", v.SelfID, view, v.currView) + return + } + v.Logger.Debugf("Node %d was informed of a new view %d", v.SelfID, view) + v.currView = view + v.nextView = v.currView + v.viewChangeMsgs.clear(v.N) + v.viewDataMsgs.clear(v.N) + v.checkTimeout = false + v.backOffFactor = 1 //reset + v.RequestsTimer.RestartTimers() +} + +// StartViewChange initiates a view change +func (v *ViewChanger) StartViewChange(view uint64, stopView bool) { + select { + case v.startChangeChan <- &change{view: view, stopView: stopView}: + default: + } +} + +// StartViewChange stops current view and timeouts, and broadcasts a view change message to all +func (v *ViewChanger) startViewChange(change *change) { + if change.view < v.currView { // this is about an old view + v.Logger.Debugf("Node %d has a view change request with an old view %d, while the current view is %d", v.SelfID, change.view, v.currView) + return + } + if v.nextView == v.currView+1 { + v.Logger.Debugf("Node %d has already started view change with last view %d", v.SelfID, v.currView) + v.checkTimeout = true // make sure timeout is checked anyway + return + } + v.nextView = v.currView + 1 + v.RequestsTimer.StopTimers() + msg := &protos.Message{ + Content: &protos.Message_ViewChange{ + ViewChange: &protos.ViewChange{ + NextView: v.nextView, + }, + }, + } + v.Comm.BroadcastConsensus(msg) + v.Logger.Debugf("Node %d started view change, last view is %d", v.SelfID, v.currView) + if change.stopView { + v.Controller.AbortView(v.currView) // abort the current view when joining view change + } + v.startViewChangeTime = v.lastTick + v.checkTimeout = true +} + +func (v *ViewChanger) processViewChangeMsg(restore bool) { + if ((uint64(len(v.viewChangeMsgs.voted)) == uint64(v.f+1)) && v.SpeedUpViewChange) || restore { // join view change + v.Logger.Debugf("Node %d is joining view change, last view is %d", v.SelfID, v.currView) + v.startViewChange(&change{v.currView, true}) + } + if (len(v.viewChangeMsgs.voted) >= v.quorum-1) || restore { // send view data + if !v.SpeedUpViewChange { + v.Logger.Debugf("Node %d is joining view change, last view is %d", v.SelfID, v.currView) + v.startViewChange(&change{v.currView, true}) + } + if !restore { + msgToSave := &protos.SavedMessage{ + Content: &protos.SavedMessage_ViewChange{ + ViewChange: &protos.ViewChange{ + NextView: v.currView, + }, + }, + } + if err := v.State.Save(msgToSave); err != nil { + v.Logger.Panicf("Failed to save message to state, error: %v", err) + } + } + v.currView = v.nextView + v.viewChangeMsgs.clear(v.N) + v.viewDataMsgs.clear(v.N) // clear because currView changed + + msg := v.prepareViewDataMsg() + leader := v.getLeader() + if leader == v.SelfID { + v.viewDataMsgs.registerVote(v.SelfID, msg) + } else { + v.Comm.SendConsensus(leader, msg) + } + v.Logger.Debugf("Node %d sent view data msg, with next view %d, to the new leader %d", v.SelfID, v.currView, leader) + } +} + +func (v *ViewChanger) prepareViewDataMsg() *protos.Message { + lastDecision, lastDecisionSignatures := v.Checkpoint.Get() + inFlight := v.getInFlight(&lastDecision) + prepared := v.InFlight.IsInFlightPrepared() + vd := &protos.ViewData{ + NextView: v.currView, + LastDecision: &lastDecision, + LastDecisionSignatures: lastDecisionSignatures, + InFlightProposal: inFlight, + InFlightPrepared: prepared, + } + vdBytes := MarshalOrPanic(vd) + sig := v.Signer.Sign(vdBytes) + msg := &protos.Message{ + Content: &protos.Message_ViewData{ + ViewData: &protos.SignedViewData{ + RawViewData: vdBytes, + Signer: v.SelfID, + Signature: sig, + }, + }, + } + return msg +} + +func (v *ViewChanger) getInFlight(lastDecision *protos.Proposal) *protos.Proposal { + inFlight := v.InFlight.InFlightProposal() + if inFlight == nil { + v.Logger.Debugf("Node %d's in flight proposal is not set", v.SelfID) + return nil + } + if inFlight.Metadata == nil { + v.Logger.Panicf("Node %d's in flight proposal metadata is not set", v.SelfID) + } + inFlightMetadata := &protos.ViewMetadata{} + if err := proto.Unmarshal(inFlight.Metadata, inFlightMetadata); err != nil { + v.Logger.Panicf("Node %d is unable to unmarshal its own in flight metadata, err: %v", v.SelfID, err) + } + proposal := &protos.Proposal{ + Header: inFlight.Header, + Metadata: inFlight.Metadata, + Payload: inFlight.Payload, + VerificationSequence: uint64(inFlight.VerificationSequence), + } + if lastDecision == nil { + v.Logger.Panicf("￿The given last decision is nil", v.SelfID) + } + if lastDecision.Metadata == nil { + return proposal // this is the first proposal after genesis + } + lastDecisionMetadata := &protos.ViewMetadata{} + if err := proto.Unmarshal(lastDecision.Metadata, lastDecisionMetadata); err != nil { + v.Logger.Panicf("Node %d is unable to unmarshal its own last decision metadata from checkpoint, err: %v", v.SelfID, err) + } + if inFlightMetadata.LatestSequence == lastDecisionMetadata.LatestSequence { + v.Logger.Debugf("Node %d's in flight proposal and the last decision has the same sequence: %d", v.SelfID, inFlightMetadata.LatestSequence) + return nil // this is not an actual in flight proposal + } + if inFlightMetadata.LatestSequence != lastDecisionMetadata.LatestSequence+1 { + v.Logger.Panicf("Node %d's in flight proposal sequence is %d while its last decision sequence is %d", v.SelfID, inFlightMetadata.LatestSequence, lastDecisionMetadata.LatestSequence) + } + return proposal +} + +func (v *ViewChanger) validateViewDataMsg(svd *protos.SignedViewData, sender uint64) bool { + if v.getLeader() != v.SelfID { // check if I am the next leader + v.Logger.Warnf("Node %d got %s from %d, but %d is not the next leader of view %d", v.SelfID, signedViewDataToString(svd), sender, v.SelfID, v.currView) + return false + } + + vd := &protos.ViewData{} + if err := proto.Unmarshal(svd.RawViewData, vd); err != nil { + v.Logger.Errorf("Node %d was unable to unmarshal viewData message from %d, error: %v", v.SelfID, sender, err) + return false + } + if vd.NextView != v.currView { // check that the message is aligned to this view + v.Logger.Warnf("Node %d got %s from %d, but %d is in view %d", v.SelfID, signedViewDataToString(svd), sender, v.SelfID, v.currView) + return false + } + + valid, lastDecisionSequence := v.checkLastDecision(svd, sender) + if !valid { + v.Logger.Warnf("Node %d got %v from %d, but the check of the last decision didn't pass", v.SelfID, signedViewDataToString(svd), sender) + return false + } + + v.Logger.Debugf("Node %d got %s from %d, and it passed the last decision check", v.SelfID, signedViewDataToString(svd), sender) + + if err := ValidateInFlight(vd.InFlightProposal, lastDecisionSequence); err != nil { + v.Logger.Warnf("Node %d got %v from %d, but the in flight proposal is invalid, reason: %v", v.SelfID, signedViewDataToString(svd), sender, err) + return false + } + + v.Logger.Debugf("Node %d got %s from %d, and the in flight proposal is valid", v.SelfID, signedViewDataToString(svd), sender) + + return true +} + +func (v *ViewChanger) checkLastDecision(svd *protos.SignedViewData, sender uint64) (valid bool, lastDecisionSequence uint64) { + vd := &protos.ViewData{} + if err := proto.Unmarshal(svd.RawViewData, vd); err != nil { + v.Logger.Errorf("Node %d was unable to unmarshal viewData message from %d, error: %v", v.SelfID, sender, err) + return false, 0 + } + + if vd.LastDecision == nil { + v.Logger.Warnf("Node %d got %s from %d, but the last decision is not set", v.SelfID, signedViewDataToString(svd), sender) + return false, 0 + } + + mySequence, myLastDecision := v.extractCurrentSequence() + + // Begin to check the last decision within the view data message. + // + // The sender might be behind, in which case the new leader might not have the right config to validate + // the decision and signatures, and so the view data message is deemed invalid. + // + // If the sender is too far ahead, the new leader might not have the appropriate config. + // We do not want the new leader to perform a sync at this point, since the sender might be malicious. + // So this message is considered invalid. If the leader is actually behind this view change will eventually timeout. + // + // If the new leader and the sender have the same last decision sequence then we check that the decisions are equal. + // However, we cannot validate the decision signatures since this last decision might have been a reconfig. + // + // Lastly, the sender is ahead by one sequence, and so the new leader validates the decision and delivers it. + // Only after delivery the message signature is verified, again since this decision might have been a reconfig. + + if vd.LastDecision.Metadata == nil { // this is a genesis proposal + if mySequence > 0 { + v.Logger.Debugf("Node %d got %s from %d, but the last decision seq (0) is lower than this node's current sequence %d", v.SelfID, signedViewDataToString(svd), sender, mySequence) + return false, 0 // this node is ahead + } + return true, 0 + } + lastDecisionMD := &protos.ViewMetadata{} + if err := proto.Unmarshal(vd.LastDecision.Metadata, lastDecisionMD); err != nil { + v.Logger.Warnf("Node %d got %s from %d, but was unable to unmarshal last decision metadata, err: %v", v.SelfID, signedViewDataToString(svd), sender, err) + return false, 0 + } + if lastDecisionMD.ViewId >= vd.NextView { + v.Logger.Warnf("Node %d got %s from %d, but the last decision view %d is greater or equal to requested next view %d", v.SelfID, signedViewDataToString(svd), sender, lastDecisionMD.ViewId, vd.NextView) + return false, 0 + } + + v.Logger.Debugf("Node %d got %s from %d, the last decision seq is %d and this node's current sequence is %d", v.SelfID, signedViewDataToString(svd), sender, lastDecisionMD.LatestSequence, mySequence) + + if lastDecisionMD.LatestSequence > mySequence+1 { // this is a decision in the future, ignoring since the node might not have the right configuration to validate + v.Logger.Debugf("Node %d got %s from %d, but the last decision seq %d is greater than this node's current sequence %d", v.SelfID, signedViewDataToString(svd), sender, lastDecisionMD.LatestSequence, mySequence) + return false, 0 + } + if lastDecisionMD.LatestSequence < mySequence { // this is a decision in the past, ignoring since the node might not have the right configuration to validate + v.Logger.Debugf("Node %d got %s from %d, but the last decision seq %d is lower than this node's current sequence %d", v.SelfID, signedViewDataToString(svd), sender, lastDecisionMD.LatestSequence, mySequence) + return false, 0 + } + + if lastDecisionMD.LatestSequence == mySequence { // just make sure that we have the same last decision, can't verify the signatures of this last decision since this might have been a reconfiguration + // the signature on this message can be verified + if svd.Signer != sender { + v.Logger.Warnf("Node %d got %s from %d, but signer %d is not the sender %d", v.SelfID, signedViewDataToString(svd), sender, svd.Signer, sender) + return false, 0 + } + if err := v.Verifier.VerifySignature(types.Signature{ID: svd.Signer, Value: svd.Signature, Msg: svd.RawViewData}); err != nil { + v.Logger.Warnf("Node %d got %s from %d, but signature is invalid, error: %v", v.SelfID, signedViewDataToString(svd), sender, err) + return false, 0 + } + + // compare the last decision itself + if !proto.Equal(vd.LastDecision, myLastDecision) { + v.Logger.Warnf("Node %d got %s from %d, they are at the same sequence but the last decisions are not equal", v.SelfID, signedViewDataToString(svd), sender) + return false, 0 + } + + return true, lastDecisionMD.LatestSequence + } + + if lastDecisionMD.LatestSequence != mySequence+1 { + v.Logger.Warnf("Node %d got %s from %d, the last decision sequence is not equal to this node's sequence + 1", v.SelfID, signedViewDataToString(svd), sender) + return false, 0 + } + + // This node is one sequence behind, validate the last decision and deliver + + _, err := ValidateLastDecision(vd, v.quorum, v.N, v.Verifier) + if err != nil { + v.Logger.Warnf("Node %d got %s from %d, but the last decision is invalid, reason: %v", v.SelfID, signedViewDataToString(svd), sender, err) + return false, 0 + } + + proposal := types.Proposal{ + Header: vd.LastDecision.Header, + Metadata: vd.LastDecision.Metadata, + Payload: vd.LastDecision.Payload, + VerificationSequence: int64(vd.LastDecision.VerificationSequence), + } + var signatures []types.Signature + for _, sig := range vd.LastDecisionSignatures { + signature := types.Signature{ + ID: sig.Signer, + Value: sig.Value, + Msg: sig.Msg, + } + signatures = append(signatures, signature) + } + v.deliverDecision(proposal, signatures) + + select { // if there was a delivery with a reconfig we need to stop here before verify signature + case <-v.stopChan: + return false, 0 + default: + } + + if svd.Signer != sender { + v.Logger.Warnf("Node %d got %s from %d, but signer %d is not the sender %d", v.SelfID, signedViewDataToString(svd), sender, svd.Signer, sender) + return false, 0 + } + if err := v.Verifier.VerifySignature(types.Signature{ID: svd.Signer, Value: svd.Signature, Msg: svd.RawViewData}); err != nil { + v.Logger.Warnf("Node %d got %s from %d, but signature is invalid, error: %v", v.SelfID, signedViewDataToString(svd), sender, err) + return false, 0 + } + + return true, lastDecisionMD.LatestSequence +} + +func (v *ViewChanger) extractCurrentSequence() (uint64, *protos.Proposal) { + myMetadata := &protos.ViewMetadata{} + myLastDesicion, _ := v.Checkpoint.Get() + if myLastDesicion.Metadata == nil { + return 0, &myLastDesicion + } + if err := proto.Unmarshal(myLastDesicion.Metadata, myMetadata); err != nil { + v.Logger.Panicf("Node %d is unable to unmarshal its own last decision metadata from checkpoint, err: %v", v.SelfID, err) + } + return myMetadata.LatestSequence, &myLastDesicion +} + +// ValidateLastDecision validates the given decision, and returns its sequence when valid +func ValidateLastDecision(vd *protos.ViewData, quorum int, N uint64, verifier api.Verifier) (lastSequence uint64, err error) { + if vd.LastDecision == nil { + return 0, errors.Errorf("the last decision is not set") + } + if vd.LastDecision.Metadata == nil { + // This is a genesis proposal, there are no signatures to validate, so we return at this point + return 0, nil + } + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(vd.LastDecision.Metadata, md); err != nil { + return 0, errors.Errorf("unable to unmarshal last decision metadata, err: %v", err) + } + if md.ViewId >= vd.NextView { + return 0, errors.Errorf("last decision view %d is greater or equal to requested next view %d", md.ViewId, vd.NextView) + } + numSigs := len(vd.LastDecisionSignatures) + if numSigs < quorum { + return 0, errors.Errorf("there are only %d last decision signatures", numSigs) + } + nodesMap := make(map[uint64]struct{}, N) + validSig := 0 + for _, sig := range vd.LastDecisionSignatures { + if _, exist := nodesMap[sig.Signer]; exist { + continue // seen signature from this node already + } + nodesMap[sig.Signer] = struct{}{} + signature := types.Signature{ + ID: sig.Signer, + Value: sig.Value, + Msg: sig.Msg, + } + proposal := types.Proposal{ + Header: vd.LastDecision.Header, + Payload: vd.LastDecision.Payload, + Metadata: vd.LastDecision.Metadata, + VerificationSequence: int64(vd.LastDecision.VerificationSequence), + } + if _, err := verifier.VerifyConsenterSig(signature, proposal); err != nil { + return 0, errors.Errorf("last decision signature is invalid, error: %v", err) + } + validSig++ + } + if validSig < quorum { + return 0, errors.Errorf("there are only %d valid last decision signatures", validSig) + } + return md.LatestSequence, nil +} + +// ValidateInFlight validates the given in-flight proposal +func ValidateInFlight(inFlightProposal *protos.Proposal, lastSequence uint64) error { + if inFlightProposal == nil { + return nil + } + if inFlightProposal.Metadata == nil { + return errors.Errorf("in flight proposal metadata is nil") + } + inFlightMetadata := &protos.ViewMetadata{} + if err := proto.Unmarshal(inFlightProposal.Metadata, inFlightMetadata); err != nil { + return errors.Errorf("unable to unmarshal the in flight proposal metadata, err: %v", err) + } + if inFlightMetadata.LatestSequence != lastSequence+1 { + return errors.Errorf("the in flight proposal sequence is %d while the last decision sequence is %d", inFlightMetadata.LatestSequence, lastSequence) + } + return nil +} + +func (v *ViewChanger) processViewDataMsg() { + if len(v.viewDataMsgs.voted) < v.quorum { + return // need enough (quorum) data to continue + } + v.Logger.Debugf("Node %d got a quorum of viewData messages", v.SelfID) + ok, _, _, err := CheckInFlight(v.getViewDataMessages(), v.f, v.quorum, v.N, v.Verifier) + if err != nil { + v.Logger.Panicf("Node %d checked the in flight and it got an error: %v", v.SelfID, err) + } + if !ok { + v.Logger.Debugf("Node %d checked the in flight and it was invalid", v.SelfID) + return + } + v.Logger.Debugf("Node %d checked the in flight and it was valid", v.SelfID) + // create the new view message + var signedMsgs []*protos.SignedViewData + myMsg := v.prepareViewDataMsg() // since it might have changed by now + signedMsgs = append(signedMsgs, myMsg.GetViewData()) // leader's message will always be the first + close(v.viewDataMsgs.votes) + for vote := range v.viewDataMsgs.votes { + if vote.sender == v.SelfID { + continue // ignore my old message + } + signedMsgs = append(signedMsgs, vote.GetViewData()) + } + msg := &protos.Message{ + Content: &protos.Message_NewView{ + NewView: &protos.NewView{ + SignedViewData: signedMsgs, + }, + }, + } + v.Logger.Debugf("Node %d is broadcasting a new view msg", v.SelfID) + v.Comm.BroadcastConsensus(msg) + v.Logger.Debugf("Node %d sent a new view msg to self", v.SelfID) + v.processMsg(v.SelfID, msg) // also send to myself // TODO consider not reprocessing this message + v.viewDataMsgs.clear(v.N) + v.Logger.Debugf("Node %d sent a new view msg", v.SelfID) +} + +// returns view data messages included in votes +func (v *ViewChanger) getViewDataMessages() []*protos.ViewData { + num := len(v.viewDataMsgs.votes) + var messages []*protos.ViewData + for i := 0; i < num; i++ { + vote := <-v.viewDataMsgs.votes + vd := &protos.ViewData{} + if err := proto.Unmarshal(vote.GetViewData().RawViewData, vd); err != nil { + v.Logger.Panicf("Node %d was unable to unmarshal viewData message, error: %v", v.SelfID, err) + } + messages = append(messages, vd) + v.viewDataMsgs.votes <- vote + } + return messages +} + +type possibleProposal struct { + proposal *protos.Proposal + preprepared int + noArgument int +} + +type proposalAndMetadata struct { + proposal *protos.Proposal + metadata *protos.ViewMetadata +} + +// CheckInFlight checks if there is an in-flight proposal that needs to be decided on (because a node might decided on it already) +func CheckInFlight(messages []*protos.ViewData, f int, quorum int, N uint64, verifier api.Verifier) (ok, noInFlight bool, inFlightProposal *protos.Proposal, err error) { + expectedSequence := maxLastDecisionSequence(messages) + 1 + possibleProposals := make([]*possibleProposal, 0) + proposalsAndMetadata := make([]*proposalAndMetadata, 0) + noInFlightCount := 0 + for _, vd := range messages { + + if vd.InFlightProposal == nil { // there is no in flight proposal here + noInFlightCount++ + proposalsAndMetadata = append(proposalsAndMetadata, &proposalAndMetadata{nil, nil}) + continue + } + + if vd.InFlightProposal.Metadata == nil { // should have been validated earlier + return false, false, nil, errors.Errorf("Node has a view data message where the in flight proposal metadata is nil") + } + + inFlightMetadata := &protos.ViewMetadata{} + if err := proto.Unmarshal(vd.InFlightProposal.Metadata, inFlightMetadata); err != nil { // should have been validated earlier + return false, false, nil, errors.Errorf("Node was unable to unmarshal the in flight proposal metadata, error: %v", err) + } + + proposalsAndMetadata = append(proposalsAndMetadata, &proposalAndMetadata{vd.InFlightProposal, inFlightMetadata}) + + if inFlightMetadata.LatestSequence != expectedSequence { // the in flight proposal sequence is not as expected + noInFlightCount++ + continue + } + + // now the in flight proposal is with the expected sequence + // find possible proposals + + if !vd.InFlightPrepared { // no prepared so isn't a possible proposal + noInFlightCount++ + continue + } + + // this proposal is prepared and so it is possible + alreadyExists := false + for _, p := range possibleProposals { + if proto.Equal(p.proposal, vd.InFlightProposal) { + alreadyExists = true + break + } + } + if !alreadyExists { + // this is not a proposal we have seen before + possibleProposals = append(possibleProposals, &possibleProposal{proposal: vd.InFlightProposal}) + } + } + + // fill out info on all possible proposals + for _, prop := range proposalsAndMetadata { + for _, possible := range possibleProposals { + + if prop.proposal == nil { + possible.noArgument++ + continue + } + + if prop.metadata.LatestSequence != expectedSequence { + possible.noArgument++ + continue + } + + if proto.Equal(prop.proposal, possible.proposal) { + possible.noArgument++ + possible.preprepared++ + } + + } + } + + // see if there is an in flight proposal that is agreed on + agreed := -1 + for i, possible := range possibleProposals { + if possible.preprepared < f+1 { // condition A2 doesn't hold + continue + } + if possible.noArgument < quorum { // condition A1 doesn't hold + continue + } + agreed = i + break + } + + // condition A holds + if agreed != -1 { + return true, false, possibleProposals[agreed].proposal, nil + } + + // condition B holds + if noInFlightCount >= quorum { // there is a quorum of messages that support that there is no prepared in flight proposal + return true, true, nil, nil + } + + return false, false, nil, nil +} + +// returns the highest sequence of a last decision within the given view data messages +func maxLastDecisionSequence(messages []*protos.ViewData) uint64 { + max := uint64(0) + for _, vd := range messages { + if vd.LastDecision == nil { + panic(fmt.Sprintf("The last decision is not set")) + } + if vd.LastDecision.Metadata == nil { // this is a genesis proposal + continue + } + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(vd.LastDecision.Metadata, md); err != nil { + panic(fmt.Sprintf("Unable to unmarshal the last decision metadata, err: %v", err)) + } + if md.LatestSequence > max { + max = md.LatestSequence + } + } + return max +} + +func (v *ViewChanger) validateNewViewMsg(msg *protos.NewView) (valid bool, sync bool, deliver bool) { + signed := msg.GetSignedViewData() + nodesMap := make(map[uint64]struct{}, v.N) + validViewDataMsgs := 0 + mySequence, myLastDecision := v.extractCurrentSequence() + for _, svd := range signed { + if _, exist := nodesMap[svd.Signer]; exist { + continue // seen data from this node already + } + nodesMap[svd.Signer] = struct{}{} + + vd := &protos.ViewData{} + if err := proto.Unmarshal(svd.RawViewData, vd); err != nil { + v.Logger.Errorf("Node %d was unable to unmarshal viewData from the newView message, error: %v", v.SelfID, err) + return false, false, false + } + + if vd.NextView != v.currView { + v.Logger.Warnf("Node %d is processing newView message, but nextView of %s is %d, while the currView is %d", v.SelfID, signedViewDataToString(svd), vd.NextView, v.currView) + return false, false, false + } + + if vd.LastDecision == nil { + v.Logger.Warnf("Node %d is processing newView message, but the last decision of %s is not set", v.SelfID, signedViewDataToString(svd)) + return false, false, false + } + + // Begin to check the last decision within the view data message. + // + // This node might be ahead, in which case it might not have the right config to validate + // the decision and signatures, and so the view data message is deemed invalid. + // + // If this node is too far behind then it needs to sync. + // No validation can be done since it might not have the appropriate config. + // + // If the last decision sequence is equal to this node's sequence then we check that the decisions are equal. + // However, we cannot validate the decision signatures since this last decision might have been a reconfig. + // + // Lastly, this node is behind by one sequence, and so it validates the decision and delivers it. + // Only after delivery the message signature is verified, again since this decision might have been a reconfig. + + if vd.LastDecision.Metadata == nil { // this is a genesis proposal + if mySequence > 0 { + // can't validate the signature since I am ahead + if err := ValidateInFlight(vd.InFlightProposal, 0); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but the in flight proposal of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + validViewDataMsgs++ + continue + } + if err := v.Verifier.VerifySignature(types.Signature{ID: svd.Signer, Value: svd.Signature, Msg: svd.RawViewData}); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but signature of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + if err := ValidateInFlight(vd.InFlightProposal, 0); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but the in flight proposal of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + validViewDataMsgs++ + continue + } + + lastDecisionMD := &protos.ViewMetadata{} + if err := proto.Unmarshal(vd.LastDecision.Metadata, lastDecisionMD); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but was unable to unmarshal the last decision of %s, err: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + if lastDecisionMD.ViewId >= vd.NextView { + v.Logger.Warnf("Node %d is processing newView message, but the last decision view %d is greater or equal to requested next view %d of %s", v.SelfID, lastDecisionMD.ViewId, vd.NextView, signedViewDataToString(svd)) + return false, false, false + } + + if lastDecisionMD.LatestSequence > mySequence+1 { // this is a decision in the future, can't verify it and should sync + v.Synchronizer.Sync() // TODO check if I manged to sync to latest decision, revalidate new view, and join the other nodes + return true, true, false + } + + if lastDecisionMD.LatestSequence < mySequence { // this is a decision in the past + // can't validate the signature since I am ahead + if err := ValidateInFlight(vd.InFlightProposal, lastDecisionMD.LatestSequence); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but the in flight proposal of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + validViewDataMsgs++ + continue + } + + if lastDecisionMD.LatestSequence == mySequence { // just make sure that we have the same last decision, can't verify the signatures of this last decision since this might have been a reconfiguration + // the signature on this message can be verified + if err := v.Verifier.VerifySignature(types.Signature{ID: svd.Signer, Value: svd.Signature, Msg: svd.RawViewData}); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but signature of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + + // compare the last decision itself + if !proto.Equal(vd.LastDecision, myLastDecision) { + v.Logger.Warnf("Node %d is processing newView message, but the last decision of %s is with the same sequence but is not equal", v.SelfID, signedViewDataToString(svd)) + return false, false, false + } + + if err := ValidateInFlight(vd.InFlightProposal, lastDecisionMD.LatestSequence); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but the in flight proposal of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + + validViewDataMsgs++ + continue + } + + if lastDecisionMD.LatestSequence != mySequence+1 { + v.Logger.Warnf("Node %d is processing newView message, but the last decision sequence is not equal to this node's sequence + 1", v.SelfID) + return false, false, false + } + + _, err := ValidateLastDecision(vd, v.quorum, v.N, v.Verifier) + if err != nil { + v.Logger.Warnf("Node %d is processing newView message, but the last decision of %s is invalid, reason: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + + proposal := types.Proposal{ + Header: vd.LastDecision.Header, + Metadata: vd.LastDecision.Metadata, + Payload: vd.LastDecision.Payload, + VerificationSequence: int64(vd.LastDecision.VerificationSequence), + } + signatures := make([]types.Signature, 0) + for _, sig := range vd.LastDecisionSignatures { + signature := types.Signature{ + ID: sig.Signer, + Value: sig.Value, + Msg: sig.Msg, + } + signatures = append(signatures, signature) + } + v.deliverDecision(proposal, signatures) + + select { // if there was a delivery with a reconfig we need to stop here before verify signature + case <-v.stopChan: + return false, false, false + default: + } + + if err := v.Verifier.VerifySignature(types.Signature{ID: svd.Signer, Value: svd.Signature, Msg: svd.RawViewData}); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but signature of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + + if err := ValidateInFlight(vd.InFlightProposal, lastDecisionMD.LatestSequence); err != nil { + v.Logger.Warnf("Node %d is processing newView message, but the in flight proposal of %s is invalid, error: %v", v.SelfID, signedViewDataToString(svd), err) + return false, false, false + } + + return true, false, true + } + + if validViewDataMsgs < v.quorum { + v.Logger.Warnf("Node %d is processing newView message, but there was only %d valid view data messages while the quorum is %d", v.SelfID, validViewDataMsgs, v.quorum) + return false, false, false + } + + v.Logger.Debugf("Node %d found a quorum of valid view data messages within the new view message", v.SelfID) + return true, false, false +} + +func (v *ViewChanger) extractViewDataMessages(msg *protos.NewView) []*protos.ViewData { + signed := msg.GetSignedViewData() + vds := make([]*protos.ViewData, 0) + for _, svd := range signed { + vd := &protos.ViewData{} + if err := proto.Unmarshal(svd.RawViewData, vd); err != nil { + v.Logger.Panicf("Node %d was unable to unmarshal viewData from the newView message, error: %v", v.SelfID, err) + } + vds = append(vds, vd) + } + return vds +} + +func (v *ViewChanger) processNewViewMsg(msg *protos.NewView) { + valid, calledSync, calledDeliver := v.validateNewViewMsg(msg) + for calledDeliver { + v.Logger.Debugf("Node %d is processing a newView message, and delivered a proposal", v.SelfID) + valid, calledSync, calledDeliver = v.validateNewViewMsg(msg) + } + if !valid { + v.Logger.Warnf("Node %d is processing a newView message, but the message is invalid", v.SelfID) + return + } + if calledSync { + v.Logger.Debugf("Node %d is processing a newView message, and requested a sync", v.SelfID) + return + } + + ok, noInFlight, inFlightProposal, err := CheckInFlight(v.extractViewDataMessages(msg), v.f, v.quorum, v.N, v.Verifier) + if err != nil { + v.Logger.Panicf("The check of the in flight proposal by node %d returned an error: %v", v.SelfID, err) + } + if !ok { + v.Logger.Debugf("The check of the in flight proposal by node %d did not pass", v.SelfID) + return + } + + if !noInFlight && !v.commitInFlightProposal(inFlightProposal) { + v.Logger.Warnf("Node %d was unable to commit the in flight proposal, not changing the view", v.SelfID) + return + } + + mySequence, _ := v.extractCurrentSequence() + + newViewToSave := &protos.SavedMessage{ + Content: &protos.SavedMessage_NewView{ + NewView: &protos.ViewMetadata{ + ViewId: v.currView, + LatestSequence: mySequence, + }, + }, + } + if err := v.State.Save(newViewToSave); err != nil { + v.Logger.Panicf("Failed to save message to state, error: %v", err) + } + + select { // if there was a delivery or sync with a reconfig when committing the in-flight proposal we should stop + case <-v.stopChan: + return + default: + } + + v.Controller.ViewChanged(v.currView, mySequence+1) + + v.RequestsTimer.RestartTimers() + v.checkTimeout = false + v.backOffFactor = 1 // reset + +} + +func (v *ViewChanger) deliverDecision(proposal types.Proposal, signatures []types.Signature) { + v.Logger.Debugf("Delivering to app the last decision proposal") + reconfig := v.Application.Deliver(proposal, signatures) + if reconfig.InLatestDecision { + v.close() + } + v.Checkpoint.Set(proposal, signatures) + requests := v.Verifier.RequestsFromProposal(proposal) + for _, reqInfo := range requests { + if err := v.RequestsTimer.RemoveRequest(reqInfo); err != nil { + v.Logger.Warnf("Error during remove of request %s from the pool, err: %v", reqInfo, err) + } + } + v.Pruner.MaybePruneRevokedRequests() +} + +func (v *ViewChanger) commitInFlightProposal(proposal *protos.Proposal) (success bool) { + myLastDecision, _ := v.Checkpoint.Get() + if proposal == nil { + v.Logger.Panicf("The in flight proposal is nil") + } + proposalMD := &protos.ViewMetadata{} + if err := proto.Unmarshal(proposal.Metadata, proposalMD); err != nil { + v.Logger.Panicf("Node %d is unable to unmarshal the in flight proposal metadata, err: %v", v.SelfID, err) + } + + if myLastDecision.Metadata != nil { // if metadata is nil then I am at genesis proposal and I should commit the in flight proposal anyway + lastDecisionMD := &protos.ViewMetadata{} + if err := proto.Unmarshal(myLastDecision.Metadata, lastDecisionMD); err != nil { + v.Logger.Panicf("Node %d is unable to unmarshal its own last decision metadata from checkpoint, err: %v", v.SelfID, err) + } + if lastDecisionMD.LatestSequence == proposalMD.LatestSequence { + v.Logger.Debugf("Node %d already decided on sequence %d and so it will not commit the in flight proposal with the same sequence", v.SelfID, lastDecisionMD.LatestSequence) + v.Logger.Debugf("Node %d is comparing its last decision with the in flight proposal with the same sequence", v.SelfID, lastDecisionMD.LatestSequence) + if !proto.Equal(&myLastDecision, proposal) { + v.Logger.Warnf("Node %d compared its last decision with the in flight proposal, which has the same sequence, but they are not equal", v.SelfID) + return false + } + return true // I already decided on the in flight proposal + } + if lastDecisionMD.LatestSequence != proposalMD.LatestSequence-1 { + v.Logger.Panicf("Node %d got an in flight proposal with sequence %d while its last decision was on sequence %d", v.SelfID, proposalMD.LatestSequence, lastDecisionMD.LatestSequence) + } + } + + v.Logger.Debugf("Node %d is creating a view for the in flight proposal", v.SelfID) + + v.inFlightViewLock.Lock() + v.inFlightView = &View{ + RetrieveCheckpoint: v.Checkpoint.Get, + DecisionsPerLeader: v.DecisionsPerLeader, + SelfID: v.SelfID, + N: v.N, + Number: proposalMD.ViewId, + LeaderID: v.SelfID, // so that no byzantine leader will cause a complain + Quorum: v.quorum, + Decider: v, + FailureDetector: v, + Sync: v, + Logger: v.Logger, + Comm: v.Comm, + Verifier: v.Verifier, + Signer: v.Signer, + ProposalSequence: proposalMD.LatestSequence, + State: v.State, + InMsgQSize: v.InMsqQSize, + ViewSequences: v.ViewSequences, + Phase: PREPARED, + } + + v.inFlightView.inFlightProposal = &types.Proposal{ + VerificationSequence: int64(proposal.VerificationSequence), + Metadata: proposal.Metadata, + Payload: proposal.Payload, + Header: proposal.Header, + } + v.inFlightView.myProposalSig = v.Signer.SignProposal(*v.inFlightView.inFlightProposal, nil) + v.inFlightView.lastBroadcastSent = &protos.Message{ + Content: &protos.Message_Commit{ + Commit: &protos.Commit{ + View: v.inFlightView.Number, + Digest: v.inFlightView.inFlightProposal.Digest(), + Seq: v.inFlightView.ProposalSequence, + Signature: &protos.Signature{ + Signer: v.inFlightView.myProposalSig.ID, + Value: v.inFlightView.myProposalSig.Value, + Msg: v.inFlightView.myProposalSig.Msg, + }, + }, + }, + } + + v.inFlightView.Start() + v.inFlightViewLock.Unlock() + + v.Logger.Debugf("Node %d started a view for the in flight proposal", v.SelfID) + + select { // wait for view to finish + case <-v.inFlightDecideChan: + case <-v.inFlightSyncChan: + case <-v.stopChan: + case now := <-v.Ticker: + v.lastTick = now + v.checkIfTimeout(now) + } + + v.inFlightView.Abort() + return true +} + +// Decide delivers to the application and informs the view changer after delivery +func (v *ViewChanger) Decide(proposal types.Proposal, signatures []types.Signature, requests []types.RequestInfo) { + v.inFlightView.stop() + v.Logger.Debugf("Delivering to app the last decision proposal") + reconfig := v.Application.Deliver(proposal, signatures) + if reconfig.InLatestDecision { + v.close() + } + v.Checkpoint.Set(proposal, signatures) + for _, reqInfo := range requests { + if err := v.RequestsTimer.RemoveRequest(reqInfo); err != nil { + v.Logger.Warnf("Error during remove of request %s from the pool, err: %v", reqInfo, err) + } + } + v.Pruner.MaybePruneRevokedRequests() + v.inFlightDecideChan <- struct{}{} +} + +// Complain panics when a view change is requested +func (v *ViewChanger) Complain(viewNum uint64, stopView bool) { + v.Logger.Panicf("Node %d has complained while in the view for the in flight proposal", v.SelfID) +} + +// Sync calls the synchronizer and informs the view changer of the sync +func (v *ViewChanger) Sync() { + // the in flight proposal view asked to sync + v.Logger.Debugf("Node %d is calling sync because the in flight proposal view has asked to sync", v.SelfID) + v.Synchronizer.Sync() + v.inFlightSyncChan <- struct{}{} +} + +// HandleViewMessage passes a message to the in flight proposal view if applicable +func (v *ViewChanger) HandleViewMessage(sender uint64, m *protos.Message) { + v.inFlightViewLock.RLock() + defer v.inFlightViewLock.RUnlock() + if view := v.inFlightView; view != nil { + v.Logger.Debugf("Node %d is passing a message to the in flight view", v.SelfID) + view.HandleMessage(sender, m) + } +} + +func (v *ViewChanger) blacklist() []uint64 { + prop, _ := v.Checkpoint.Get() + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(prop.Metadata, md); err != nil { + v.Logger.Panicf("Failed unmarshalling metadata: %v", err) + } + return md.BlackList +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/api/dependencies.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/api/dependencies.go new file mode 100644 index 00000000000..cfab37e929b --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/api/dependencies.go @@ -0,0 +1,99 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package api + +import ( + bft "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" +) + +// Application delivers the consented proposal and corresponding signatures. +type Application interface { + // Deliver delivers the given proposal and signatures. + // After the call returns we assume that this proposal is stored in persistent memory. + // It returns whether this proposal was a reconfiguration and the current config. + Deliver(proposal bft.Proposal, signature []bft.Signature) bft.Reconfig +} + +// Comm enables the communications between the nodes. +type Comm interface { + // SendConsensus sends the consensus protocol related message m to the node with id targetID. + SendConsensus(targetID uint64, m *protos.Message) + // SendTransaction sends the given client's request to the node with id targetID. + SendTransaction(targetID uint64, request []byte) + // Nodes returns a set of ids of participating nodes. + // In case you need to change or keep this slice, create a copy. + Nodes() []uint64 +} + +// Assembler creates proposals. +type Assembler interface { + // AssembleProposal creates a proposal which includes + // the given requests (when permitting) and metadata. + AssembleProposal(metadata []byte, requests [][]byte) bft.Proposal +} + +// WriteAheadLog is a write ahead log. +type WriteAheadLog interface { + // Append appends a data item to the end of the WAL + // and indicate whether this entry is a truncation point. + Append(entry []byte, truncateTo bool) error +} + +// Signer signs on the given data. +type Signer interface { + // Sign signs on the given data and returns the signature. + Sign([]byte) []byte + // SignProposal signs on the given proposal and returns a composite Signature. + SignProposal(proposal bft.Proposal, auxiliaryInput []byte) *bft.Signature +} + +// Verifier validates data and verifies signatures. +type Verifier interface { + // VerifyProposal verifies the given proposal and returns the included requests' info. + VerifyProposal(proposal bft.Proposal) ([]bft.RequestInfo, error) + // VerifyRequest verifies the given request and returns its info. + VerifyRequest(val []byte) (bft.RequestInfo, error) + // VerifyConsenterSig verifies the signature for the given proposal. + // It returns the auxiliary data in the signature. + VerifyConsenterSig(signature bft.Signature, prop bft.Proposal) ([]byte, error) + // VerifySignature verifies the signature. + VerifySignature(signature bft.Signature) error + // VerificationSequence returns the current verification sequence. + VerificationSequence() uint64 + // RequestsFromProposal returns from the given proposal the included requests' info + RequestsFromProposal(proposal bft.Proposal) []bft.RequestInfo + // AuxiliaryData extracts the auxiliary data from a signature's message + AuxiliaryData([]byte) []byte +} + +// MembershipNotifier notifies if there was a membership change in the last proposal. +type MembershipNotifier interface { + //MembershipChange returns true if there was a membership change in the last proposal. + MembershipChange() bool +} + +// RequestInspector extracts info (i.e. request id and client id) from a given request. +type RequestInspector interface { + // RequestID returns info about the given request. + RequestID(req []byte) bft.RequestInfo +} + +// Synchronizer reaches the cluster nodes and fetches blocks in order to sync the replica's state. +type Synchronizer interface { + // Sync blocks indefinitely until the replica's state is synchronized to the latest decision, + // and returns it with info about reconfiguration. + Sync() bft.SyncResponse +} + +// Logger defines the contract for logging. +type Logger interface { + Debugf(template string, args ...interface{}) + Infof(template string, args ...interface{}) + Errorf(template string, args ...interface{}) + Warnf(template string, args ...interface{}) + Panicf(template string, args ...interface{}) +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/consensus/consensus.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/consensus/consensus.go new file mode 100644 index 00000000000..fd0c8063496 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/consensus/consensus.go @@ -0,0 +1,487 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package consensus + +import ( + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + + algorithm "github.com/SmartBFT-Go/consensus/internal/bft" + bft "github.com/SmartBFT-Go/consensus/pkg/api" + "github.com/SmartBFT-Go/consensus/pkg/types" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/pkg/errors" +) + +// Consensus submits requests to be total ordered, +// and delivers to the application proposals by invoking Deliver() on it. +// The proposals contain batches of requests assembled together by the Assembler. +type Consensus struct { + Config types.Configuration + Application bft.Application + Assembler bft.Assembler + WAL bft.WriteAheadLog + WALInitialContent [][]byte + Comm bft.Comm + Signer bft.Signer + Verifier bft.Verifier + MembershipNotifier bft.MembershipNotifier + RequestInspector bft.RequestInspector + Synchronizer bft.Synchronizer + Logger bft.Logger + Metadata protos.ViewMetadata + LastProposal types.Proposal + LastSignatures []types.Signature + Scheduler <-chan time.Time + ViewChangerTicker <-chan time.Time + + submittedChan chan struct{} + inFlight *algorithm.InFlightData + checkpoint *types.Checkpoint + Pool *algorithm.Pool + viewChanger *algorithm.ViewChanger + controller *algorithm.Controller + collector *algorithm.StateCollector + state *algorithm.PersistedState + numberOfNodes uint64 + nodes []uint64 + nodeMap sync.Map + + consensusDone sync.WaitGroup + stopOnce sync.Once + stopChan chan struct{} + + consensusLock sync.RWMutex + + reconfigChan chan types.Reconfig + + running uint64 +} + +func (c *Consensus) Complain(viewNum uint64, stopView bool) { + c.consensusLock.RLock() + defer c.consensusLock.RUnlock() + c.viewChanger.StartViewChange(viewNum, stopView) +} + +func (c *Consensus) Deliver(proposal types.Proposal, signatures []types.Signature) types.Reconfig { + reconfig := c.Application.Deliver(proposal, signatures) + if reconfig.InLatestDecision { + c.Logger.Debugf("Detected a reconfig in deliver") + c.reconfigChan <- reconfig + } + return reconfig +} + +func (c *Consensus) Sync() types.SyncResponse { + syncResponse := c.Synchronizer.Sync() + if syncResponse.Reconfig.InReplicatedDecisions { + c.Logger.Debugf("Detected a reconfig in sync") + c.reconfigChan <- types.Reconfig{ + InLatestDecision: true, + CurrentNodes: syncResponse.Reconfig.CurrentNodes, + CurrentConfig: syncResponse.Reconfig.CurrentConfig, + } + } + return syncResponse +} + +// GetLeaderID returns the current leader ID or zero if Consensus is not running +func (c *Consensus) GetLeaderID() uint64 { + if atomic.LoadUint64(&c.running) == 0 { + return 0 + } + return c.controller.GetLeaderID() +} + +func (c *Consensus) Start() error { + if err := c.ValidateConfiguration(c.Comm.Nodes()); err != nil { + return errors.Wrapf(err, "configuration is invalid") + } + + c.consensusDone.Add(1) + c.stopOnce = sync.Once{} + c.stopChan = make(chan struct{}) + c.reconfigChan = make(chan types.Reconfig) + c.consensusLock.Lock() + defer c.consensusLock.Unlock() + + c.setNodes(c.Comm.Nodes()) + + c.inFlight = &algorithm.InFlightData{} + + c.state = &algorithm.PersistedState{ + InFlightProposal: c.inFlight, + Entries: c.WALInitialContent, + Logger: c.Logger, + WAL: c.WAL, + } + + c.checkpoint = &types.Checkpoint{} + c.checkpoint.Set(c.LastProposal, c.LastSignatures) + + c.createComponents() + opts := algorithm.PoolOptions{ + QueueSize: int64(c.Config.RequestPoolSize), + ForwardTimeout: c.Config.RequestForwardTimeout, + ComplainTimeout: c.Config.RequestComplainTimeout, + AutoRemoveTimeout: c.Config.RequestAutoRemoveTimeout, + } + c.submittedChan = make(chan struct{}, 1) + c.Pool = algorithm.NewPool(c.Logger, c.RequestInspector, c.controller, opts, c.submittedChan) + c.continueCreateComponents() + + c.Logger.Debugf("Application started with view %d, seq %d, and decisions %d", c.Metadata.ViewId, c.Metadata.LatestSequence, c.Metadata.DecisionsInView) + view, seq, dec := c.setViewAndSeq(c.Metadata.ViewId, c.Metadata.LatestSequence, c.Metadata.DecisionsInView) + + c.waitForEachOther() + + go c.run() + + c.startComponents(view, seq, dec, true) + + atomic.StoreUint64(&c.running, 1) + + return nil +} + +func (c *Consensus) run() { + defer func() { + c.Logger.Infof("Exiting") + atomic.StoreUint64(&c.running, 0) + c.Stop() + }() + + defer c.consensusDone.Done() + + for { + select { + case reconfig := <-c.reconfigChan: + c.reconfig(reconfig) + case <-c.stopChan: + return + } + } +} + +func (c *Consensus) reconfig(reconfig types.Reconfig) { + c.Logger.Debugf("Starting reconfig") + c.consensusLock.Lock() + defer c.consensusLock.Unlock() + + // make sure all components are stopped + c.viewChanger.Stop() + c.controller.StopWithPoolPause() + c.collector.Stop() + + var exist bool + for _, n := range reconfig.CurrentNodes { + if c.Config.SelfID == n { + exist = true + break + } + } + + if !exist { + c.Logger.Infof("Evicted in reconfiguration, shutting down") + c.close() + return + } + + c.Config = reconfig.CurrentConfig + if err := c.ValidateConfiguration(reconfig.CurrentNodes); err != nil { + if strings.Contains(err.Error(), "nodes does not contain the SelfID") { + c.close() + c.Logger.Infof("Closing consensus since this node is not in the current set of nodes") + return + } else { + c.Logger.Panicf("Configuration is invalid, error: %v", err) + } + } + + c.setNodes(reconfig.CurrentNodes) + + c.createComponents() + opts := algorithm.PoolOptions{ + ForwardTimeout: c.Config.RequestForwardTimeout, + ComplainTimeout: c.Config.RequestComplainTimeout, + AutoRemoveTimeout: c.Config.RequestAutoRemoveTimeout, + } + c.Pool.ChangeTimeouts(c.controller, opts) // TODO handle reconfiguration of queue size in the pool + c.continueCreateComponents() + + proposal, _ := c.checkpoint.Get() + md := &protos.ViewMetadata{} + if err := proto.Unmarshal(proposal.Metadata, md); err != nil { + c.Logger.Panicf("Couldn't unmarshal the checkpoint metadata, error: %v", err) + } + c.Logger.Debugf("Checkpoint with view %d and seq %d", md.ViewId, md.LatestSequence) + + view, seq, dec := c.setViewAndSeq(md.ViewId, md.LatestSequence, md.DecisionsInView) + + c.waitForEachOther() + + c.startComponents(view, seq, dec, false) + + c.Pool.RestartTimers() + + c.Logger.Debugf("Reconfig is done") +} + +func (c *Consensus) close() { + c.stopOnce.Do( + func() { + select { + case <-c.stopChan: + return + default: + close(c.stopChan) + } + }, + ) +} + +func (c *Consensus) Stop() { + c.consensusLock.RLock() + c.viewChanger.Stop() + c.controller.Stop() + c.collector.Stop() + c.consensusLock.RUnlock() + c.close() + c.consensusDone.Wait() +} + +func (c *Consensus) HandleMessage(sender uint64, m *protos.Message) { + if _, exists := c.nodeMap.Load(sender); !exists { + c.Logger.Warnf("Received message from unexpected node %d", sender) + return + } + c.consensusLock.RLock() + defer c.consensusLock.RUnlock() + c.controller.ProcessMessages(sender, m) +} + +func (c *Consensus) HandleRequest(sender uint64, req []byte) { + c.consensusLock.RLock() + defer c.consensusLock.RUnlock() + c.controller.HandleRequest(sender, req) +} + +func (c *Consensus) SubmitRequest(req []byte) error { + c.consensusLock.RLock() + defer c.consensusLock.RUnlock() + if c.GetLeaderID() == 0 { + return errors.Errorf("no leader") + } + c.Logger.Debugf("Submit Request: %s", c.RequestInspector.RequestID(req)) + return c.controller.SubmitRequest(req) +} + +func (c *Consensus) proposalMaker() *algorithm.ProposalMaker { + return &algorithm.ProposalMaker{ + DecisionsPerLeader: c.Config.DecisionsPerLeader, + Checkpoint: c.checkpoint, + State: c.state, + Comm: c.controller, + Decider: c.controller, + Logger: c.Logger, + Signer: c.Signer, + MembershipNotifier: c.MembershipNotifier, + SelfID: c.Config.SelfID, + Sync: c.controller, + FailureDetector: c, + Verifier: c.Verifier, + N: c.numberOfNodes, + InMsqQSize: int(c.Config.IncomingMessageBufferSize), + ViewSequences: c.controller.ViewSequences, + } +} + +func (c *Consensus) ValidateConfiguration(nodes []uint64) error { + if err := c.Config.Validate(); err != nil { + return errors.Wrap(err, "bad configuration") + } + + nodeSet := make(map[uint64]bool) + for _, val := range nodes { + if val == 0 { + return errors.Errorf("nodes contains node id 0 which is not permitted, nodes: %v", nodes) + } + nodeSet[val] = true + } + + if !nodeSet[c.Config.SelfID] { + return errors.Errorf("nodes does not contain the SelfID: %d, nodes: %v", c.Config.SelfID, nodes) + } + + if len(nodeSet) != len(nodes) { + return errors.Errorf("nodes contains duplicate IDs, nodes: %v", nodes) + } + + return nil +} + +func (c *Consensus) setNodes(nodes []uint64) { + for _, n := range c.nodes { + c.nodeMap.Delete(n) + } + + c.numberOfNodes = uint64(len(nodes)) + c.nodes = sortNodes(nodes) + for _, n := range nodes { + c.nodeMap.Store(n, struct{}{}) + } +} + +func sortNodes(nodes []uint64) []uint64 { + sorted := make([]uint64, len(nodes)) + copy(sorted, nodes) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i] < sorted[j] + }) + return sorted +} + +func (c *Consensus) createComponents() { + c.viewChanger = &algorithm.ViewChanger{ + SelfID: c.Config.SelfID, + N: c.numberOfNodes, + NodesList: c.nodes, + LeaderRotation: c.Config.LeaderRotation, + DecisionsPerLeader: c.Config.DecisionsPerLeader, + SpeedUpViewChange: c.Config.SpeedUpViewChange, + Logger: c.Logger, + Signer: c.Signer, + Verifier: c.Verifier, + Application: c, + Checkpoint: c.checkpoint, + InFlight: c.inFlight, + State: c.state, + // Controller later + // RequestsTimer later + Ticker: c.ViewChangerTicker, + ResendTimeout: c.Config.ViewChangeResendInterval, + ViewChangeTimeout: c.Config.ViewChangeTimeout, + InMsqQSize: int(c.Config.IncomingMessageBufferSize), + } + + c.collector = &algorithm.StateCollector{ + SelfID: c.Config.SelfID, + N: c.numberOfNodes, + Logger: c.Logger, + CollectTimeout: c.Config.CollectTimeout, + } + + c.controller = &algorithm.Controller{ + Checkpoint: c.checkpoint, + WAL: c.WAL, + ID: c.Config.SelfID, + N: c.numberOfNodes, + NodesList: c.nodes, + LeaderRotation: c.Config.LeaderRotation, + DecisionsPerLeader: c.Config.DecisionsPerLeader, + Verifier: c.Verifier, + Logger: c.Logger, + Assembler: c.Assembler, + Application: c, + FailureDetector: c, + Synchronizer: c, + Comm: c.Comm, + Signer: c.Signer, + RequestInspector: c.RequestInspector, + ViewChanger: c.viewChanger, + ViewSequences: &atomic.Value{}, + Collector: c.collector, + State: c.state, + } + + c.viewChanger.Comm = c.controller + c.viewChanger.Synchronizer = c.controller + + c.controller.ProposerBuilder = c.proposalMaker() +} + +func (c *Consensus) continueCreateComponents() { + batchBuilder := algorithm.NewBatchBuilder(c.Pool, c.submittedChan, c.Config.RequestBatchMaxCount, c.Config.RequestBatchMaxBytes, c.Config.RequestBatchMaxInterval) + leaderMonitor := algorithm.NewHeartbeatMonitor(c.Scheduler, c.Logger, c.Config.LeaderHeartbeatTimeout, c.Config.LeaderHeartbeatCount, c.controller, c.numberOfNodes, c.controller, c.controller.ViewSequences, c.Config.NumOfTicksBehindBeforeSyncing) + c.controller.RequestPool = c.Pool + c.controller.Batcher = batchBuilder + c.controller.LeaderMonitor = leaderMonitor + + c.viewChanger.Controller = c.controller + c.viewChanger.Pruner = c.controller + c.viewChanger.RequestsTimer = c.Pool + c.viewChanger.ViewSequences = c.controller.ViewSequences +} + +func (c *Consensus) setViewAndSeq(view, seq, dec uint64) (newView, newSeq, newDec uint64) { + newView = view + newSeq = seq + // decisions in view is incremented after delivery, + // so if we delivered to the application proposal with decisions i, + // then we are expecting to be proposed a proposal with decisions i+1, + // unless this is the genesis block, or after a view change + newDec = dec + 1 + if seq == 0 { + newDec = 0 + } + viewChange, err := c.state.LoadViewChangeIfApplicable() + if err != nil { + c.Logger.Panicf("Failed loading view change, error: %v", err) + } + if viewChange == nil { + c.Logger.Debugf("No view change to restore") + } else { + // Check if the view change has a newer view + if viewChange.NextView >= view { + c.Logger.Debugf("Restoring from view change with view %d", viewChange.NextView) + newView = viewChange.NextView + restoreChan := make(chan struct{}, 1) + restoreChan <- struct{}{} + c.viewChanger.Restore = restoreChan + } + } + + viewSeq, err := c.state.LoadNewViewIfApplicable() + if err != nil { + c.Logger.Panicf("Failed loading new view, error: %v", err) + } + if viewSeq == nil { + c.Logger.Debugf("No new view to restore") + } else { + // Check if metadata should be taken from the restored new view + if viewSeq.Seq >= seq { + c.Logger.Debugf("Restoring from new view with view %d and seq %d", viewSeq.View, viewSeq.Seq) + newView = viewSeq.View + newSeq = viewSeq.Seq + newDec = 0 + } + } + return newView, newSeq, newDec +} + +func (c *Consensus) waitForEachOther() { + c.viewChanger.ControllerStartedWG = sync.WaitGroup{} + c.viewChanger.ControllerStartedWG.Add(1) + c.controller.StartedWG = &c.viewChanger.ControllerStartedWG +} + +func (c *Consensus) startComponents(view, seq, dec uint64, configSync bool) { + // If we delivered to the application proposal with sequence i, + // then we are expecting to be proposed a proposal with sequence i+1. + c.collector.Start() + c.viewChanger.Start(view) + if configSync { + c.controller.Start(view, seq+1, dec, c.Config.SyncOnStart) + } else { + c.controller.Start(view, seq+1, dec, false) + } +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/types/config.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/types/config.go new file mode 100644 index 00000000000..b823e093f3a --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/types/config.go @@ -0,0 +1,171 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package types + +import ( + "time" + + "github.com/pkg/errors" +) + +// Configuration defines the parameters needed in order to create an instance of Consensus. +type Configuration struct { + // SelfID is the identifier of the node. + SelfID uint64 + + // RequestBatchMaxCount is the maximal number of requests in a batch. + // A request batch that reaches this count is proposed immediately. + RequestBatchMaxCount uint64 + // RequestBatchMaxBytes is the maximal total size of requests in a batch, in bytes. + // This is also the maximal size of a request. A request batch that reaches this size is proposed immediately. + RequestBatchMaxBytes uint64 + // RequestBatchMaxInterval is the maximal time interval a request batch is waiting before it is proposed. + // A request batch is accumulating requests until RequestBatchMaxInterval had elapsed from the time the batch was + // first created (i.e. the time the first request was added to it), or until it is of count RequestBatchMaxCount, + // or total size RequestBatchMaxBytes, which ever happens first. + RequestBatchMaxInterval time.Duration + + // IncomingMessageBufferSize is the size of the buffer holding incoming messages before they are processed. + IncomingMessageBufferSize uint64 + // RequestPoolSize is the number of pending requests retained by the node. + // The RequestPoolSize is recommended to be at least double (x2) the RequestBatchMaxCount. + RequestPoolSize uint64 + + // RequestForwardTimeout is started from the moment a request is submitted, and defines the interval after which a + // request is forwarded to the leader. + RequestForwardTimeout time.Duration + // RequestComplainTimeout is started when RequestForwardTimeout expires, and defines the interval after which the + // node complains about the view leader. + RequestComplainTimeout time.Duration + // RequestAutoRemoveTimeout is started when RequestComplainTimeout expires, and defines the interval after which + // a request is removed (dropped) from the request pool. + RequestAutoRemoveTimeout time.Duration + + // ViewChangeResendInterval defined the interval in which the ViewChange message is resent. + ViewChangeResendInterval time.Duration + // ViewChangeTimeout is started when a node first receives a quorum of ViewChange messages, and defines the + // interval after which the node will try to initiate a view change with a higher view number. + ViewChangeTimeout time.Duration + + // LeaderHeartbeatTimeout is the interval after which, if nodes do not receive a "sign of life" from the leader, + // they complain on the current leader and try to initiate a view change. A sign of life is either a heartbeat + // or a message from the leader. + LeaderHeartbeatTimeout time.Duration + // LeaderHeartbeatCount is the number of heartbeats per LeaderHeartbeatTimeout that the leader should emit. + // The heartbeat-interval is equal to: LeaderHeartbeatTimeout/LeaderHeartbeatCount. + LeaderHeartbeatCount uint64 + // NumOfTicksBehindBeforeSyncing is the number of follower ticks where the follower is behind the leader + // by one sequence before starting a sync + NumOfTicksBehindBeforeSyncing uint64 + + // CollectTimeout is the interval after which the node stops listening to StateTransferResponse messages, + // stops collecting information about view metadata from remote nodes. + CollectTimeout time.Duration + + // SyncOnStart is a flag indicating whether a sync is required on startup. + SyncOnStart bool + + // SpeedUpViewChange is a flag indicating whether a node waits for only f+1 view change messages to join + // the view change (hence speeds up the view change process), or it waits for a quorum before joining. + // Waiting only for f+1 is considered less safe. + SpeedUpViewChange bool + + // LeaderRotation is a flag indicating whether leader rotation is active. + LeaderRotation bool + // DecisionsPerLeader is the number of decisions reached by a leader before there is a leader rotation. + DecisionsPerLeader uint64 +} + +// DefaultConfig contains reasonable values for a small cluster that resides on the same geography (or "Region"), but +// possibly on different availability zones within the geography. It is assumed that the typical latency between nodes, +// and between clients to nodes, is approximately 10ms. +// Set the SelfID. +var DefaultConfig = Configuration{ + RequestBatchMaxCount: 100, + RequestBatchMaxBytes: 10 * 1024 * 1024, + RequestBatchMaxInterval: 50 * time.Millisecond, + IncomingMessageBufferSize: 200, + RequestPoolSize: 400, + RequestForwardTimeout: 2 * time.Second, + RequestComplainTimeout: 20 * time.Second, + RequestAutoRemoveTimeout: 3 * time.Minute, + ViewChangeResendInterval: 5 * time.Second, + ViewChangeTimeout: 20 * time.Second, + LeaderHeartbeatTimeout: time.Minute, + LeaderHeartbeatCount: 10, + NumOfTicksBehindBeforeSyncing: 10, + CollectTimeout: time.Second, + SyncOnStart: false, + SpeedUpViewChange: false, + LeaderRotation: true, + DecisionsPerLeader: 3, +} + +func (c Configuration) Validate() error { + if !(c.SelfID > 0) { + return errors.Errorf("SelfID is lower than or equal to zero") + } + + if !(c.RequestBatchMaxCount > 0) { + return errors.Errorf("RequestBatchMaxCount should be greater than zero") + } + if !(c.RequestBatchMaxBytes > 0) { + return errors.Errorf("RequestBatchMaxBytes should be greater than zero") + } + if !(c.RequestBatchMaxInterval > 0) { + return errors.Errorf("RequestBatchMaxInterval should be greater than zero") + } + if !(c.IncomingMessageBufferSize > 0) { + return errors.Errorf("IncomingMessageBufferSize should be greater than zero") + } + if !(c.RequestPoolSize > 0) { + return errors.Errorf("RequestPoolSize should be greater than zero") + } + if !(c.RequestForwardTimeout > 0) { + return errors.Errorf("RequestForwardTimeout should be greater than zero") + } + if !(c.RequestComplainTimeout > 0) { + return errors.Errorf("RequestComplainTimeout should be greater than zero") + } + if !(c.RequestAutoRemoveTimeout > 0) { + return errors.Errorf("RequestAutoRemoveTimeout should be greater than zero") + } + if !(c.ViewChangeResendInterval > 0) { + return errors.Errorf("ViewChangeResendInterval should be greater than zero") + } + if !(c.ViewChangeTimeout > 0) { + return errors.Errorf("ViewChangeTimeout should be greater than zero") + } + if !(c.LeaderHeartbeatTimeout > 0) { + return errors.Errorf("LeaderHeartbeatTimeout should be greater than zero") + } + if !(c.LeaderHeartbeatCount > 0) { + return errors.Errorf("LeaderHeartbeatCount should be greater than zero") + } + if !(c.NumOfTicksBehindBeforeSyncing > 0) { + return errors.Errorf("NumOfTicksBehindBeforeSyncing should be greater than zero") + } + if !(c.CollectTimeout > 0) { + return errors.Errorf("CollectTimeout should be greater than zero") + } + if c.RequestBatchMaxCount > c.RequestBatchMaxBytes { + return errors.Errorf("RequestBatchMaxCount is bigger than RequestBatchMaxBytes") + } + if c.RequestForwardTimeout > c.RequestComplainTimeout { + return errors.Errorf("RequestForwardTimeout is bigger than RequestComplainTimeout") + } + if c.RequestComplainTimeout > c.RequestAutoRemoveTimeout { + return errors.Errorf("RequestComplainTimeout is bigger than RequestAutoRemoveTimeout") + } + if c.ViewChangeResendInterval > c.ViewChangeTimeout { + return errors.Errorf("ViewChangeResendInterval is bigger than ViewChangeTimeout") + } + if c.LeaderRotation && c.DecisionsPerLeader <= 0 { + return errors.Errorf("DecisionsPerLeader should be greater than zero when leader rotation is active") + } + + return nil +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/types/types.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/types/types.go new file mode 100644 index 00000000000..baab56aa5a5 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/types/types.go @@ -0,0 +1,123 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package types + +import ( + "crypto/sha256" + "encoding/asn1" + "encoding/hex" + "fmt" + "sync" + + "github.com/SmartBFT-Go/consensus/smartbftprotos" +) + +type Proposal struct { + Payload []byte + Header []byte + Metadata []byte + VerificationSequence int64 // int64 for asn1 marshaling +} + +type Signature struct { + ID uint64 + Value []byte + Msg []byte +} + +type Decision struct { + Proposal Proposal + Signatures []Signature +} + +type ViewAndSeq struct { + View uint64 + Seq uint64 +} + +type RequestInfo struct { + ClientID string + ID string +} + +func (r *RequestInfo) String() string { + return r.ClientID + ":" + r.ID +} + +func (p Proposal) Digest() string { + rawBytes, err := asn1.Marshal(Proposal{ + VerificationSequence: p.VerificationSequence, + Metadata: p.Metadata, + Payload: p.Payload, + Header: p.Header, + }) + + if err != nil { + panic(fmt.Sprintf("failed marshaling proposal: %v", err)) + } + + return computeDigest(rawBytes) +} + +func computeDigest(rawBytes []byte) string { + h := sha256.New() + h.Write(rawBytes) + digest := h.Sum(nil) + return hex.EncodeToString(digest) +} + +type Checkpoint struct { + lock sync.RWMutex + proposal Proposal + signatures []Signature +} + +func (c *Checkpoint) Get() (smartbftprotos.Proposal, []*smartbftprotos.Signature) { + c.lock.RLock() + defer c.lock.RUnlock() + + p := smartbftprotos.Proposal{ + Header: c.proposal.Header, + Payload: c.proposal.Payload, + Metadata: c.proposal.Metadata, + VerificationSequence: uint64(c.proposal.VerificationSequence), + } + + var signatures []*smartbftprotos.Signature + for _, sig := range c.signatures { + signatures = append(signatures, &smartbftprotos.Signature{ + Msg: sig.Msg, + Value: sig.Value, + Signer: sig.ID, + }) + } + return p, signatures +} + +func (c *Checkpoint) Set(proposal Proposal, signatures []Signature) { + c.lock.Lock() + defer c.lock.Unlock() + + c.proposal = proposal + c.signatures = signatures +} + +type Reconfig struct { + InLatestDecision bool + CurrentNodes []uint64 + CurrentConfig Configuration +} + +type SyncResponse struct { + Latest Decision + Reconfig ReconfigSync +} + +type ReconfigSync struct { + InReplicatedDecisions bool + CurrentNodes []uint64 + CurrentConfig Configuration +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/reader.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/reader.go new file mode 100644 index 00000000000..8af373a9c15 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/reader.go @@ -0,0 +1,162 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package wal + +import ( + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + + "github.com/SmartBFT-Go/consensus/pkg/api" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" +) + +type LogRecordReader struct { + fileName string + logger api.Logger + logFile *os.File + crc uint32 +} + +func NewLogRecordReader(logger api.Logger, fileName string) (*LogRecordReader, error) { + if logger == nil { + return nil, errors.New("logger is nil") + } + + r := &LogRecordReader{ + fileName: fileName, + logger: logger, + } + + var err error + r.logFile, err = os.Open(fileName) + if err != nil { + return nil, err + } + + _, err = r.logFile.Seek(0, io.SeekStart) + if err != nil { + _ = r.Close() + return nil, err + } + + //read the CRC-Anchor, the first record of every file + recLen, crc, err := r.readHeader() + if err != nil { + _ = r.Close() + return nil, err + } + padSize := getPadSize(int(recLen)) + payload, err := r.readPayload(int(recLen) + padSize) + if err != nil { + _ = r.Close() + return nil, err + } + var record = &protos.LogRecord{} + err = proto.Unmarshal(payload[:recLen], record) + if err != nil { + _ = r.Close() + return nil, err + } + + if record.Type != protos.LogRecord_CRC_ANCHOR { + _ = r.Close() + return nil, fmt.Errorf("failed reading CRC-Anchor from log file: %s", fileName) + } + r.crc = crc + + r.logger.Debugf("Initialized reader: CRC-Anchor: %08X, file: %s", r.crc, r.fileName) + + return r, nil +} + +func (r *LogRecordReader) Close() error { + var err error + if r.logFile != nil { + err = r.logFile.Close() + } + + r.logger.Debugf("Closed reader: CRC: %08X, file: %s", r.crc, r.fileName) + + r.logger = nil + r.logFile = nil + + return err +} + +func (r *LogRecordReader) CRC() uint32 { + return r.crc +} + +func (r *LogRecordReader) Read() (*protos.LogRecord, error) { + recLen, crc, err := r.readHeader() + if err != nil { + return nil, err + } + padSize := getPadSize(int(recLen)) + payload, err := r.readPayload(int(recLen) + padSize) + if err != nil { + return nil, err + } + var record = &protos.LogRecord{} + err = proto.Unmarshal(payload[:recLen], record) + if err != nil { + return nil, fmt.Errorf("wal: failed to unmarshal payload: %s", err) + } + + switch record.Type { + case protos.LogRecord_ENTRY, protos.LogRecord_CONTROL: + if !verifyCRC(r.crc, crc, payload) { + return nil, ErrCRC + } + fallthrough + case protos.LogRecord_CRC_ANCHOR: + r.crc = crc + default: + return nil, fmt.Errorf("wal: unexpected LogRecord_Type: %v", record.Type) + } + + return record, nil +} + +// readHeader attempts to read the 8 byte header. +// If it fails, it fails like io.ReadFull(). +func (r *LogRecordReader) readHeader() (length, crc uint32, err error) { + buff := make([]byte, recordHeaderSize) + n, err := io.ReadFull(r.logFile, buff) + if err != nil { + r.logger.Debugf("Failed to read header in full: expected=%d, actual=%d; error: %s", recordHeaderSize, n, err) + return 0, 0, err + } + + header := binary.LittleEndian.Uint64(buff) + length = uint32(header & recordLengthMask) + crc = uint32((header & recordCRCMask) >> 32) + + return length, crc, nil +} + +// readPayload attempts to read a payload in full. +// If it fails, it fails like io.ReadFull(). +func (r *LogRecordReader) readPayload(len int) (payload []byte, err error) { + buff := make([]byte, len) + n, err := io.ReadFull(r.logFile, buff) + if err != nil { + r.logger.Debugf("Failed to read payload in full: expected=%d, actual=%d; error: %s", len, n, err) + return nil, err + } + + return buff, nil +} + +func verifyCRC(prevCRC, expectedCRC uint32, data []byte) bool { + dataCRC := crc32.Update(prevCRC, crcTable, data) + return dataCRC == expectedCRC +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/util.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/util.go new file mode 100644 index 00000000000..33f5fa030b5 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/util.go @@ -0,0 +1,299 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package wal + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/SmartBFT-Go/consensus/pkg/api" +) + +var padTable [][]byte + +func init() { + padTable = make([][]byte, 8) + for i := 0; i < 8; i++ { + padTable[i] = make([]byte, i) + } +} + +func dirEmpty(dirPath string) bool { + names, err := dirReadWalNames(dirPath) + if err != nil { + return true + } + + return len(names) == 0 +} + +func dirCreate(dirPath string) error { + dirFile, err := os.Open(dirPath) + if err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(dirPath, walDirPermPrivateRWX) + } + return err + } + defer dirFile.Close() + return err +} + +// dirReadWalNames finds file names that follow the wal file name template. +func dirReadWalNames(dirPath string) ([]string, error) { + dirFile, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dirFile.Close() + + names, err := dirFile.Readdirnames(-1) + if err != nil { + return nil, err + } + + walNames := make([]string, 0) + for _, name := range names { + if strings.HasSuffix(name, walFileSuffix) { + var index uint64 + n, err := fmt.Sscanf(name, walFileTemplate, &index) + if n != 1 || err != nil { + continue + } + walNames = append(walNames, name) + } + } + + sort.Strings(walNames) + + return walNames, nil +} + +// checkWalFiles for continuous sequence, readable CRC-Anchor. +// If the the last file cannot be read, it may be ignored, (or repaired). +func checkWalFiles(logger api.Logger, dirName string, walNames []string) ([]uint64, error) { + sort.Strings(walNames) + var indexes = make([]uint64, 0) + for i, name := range walNames { + index, err := parseWalFileName(name) + if err != nil { + logger.Errorf("wal: failed to parse file name: %s; error: %s", name, err) + return nil, err + } + indexes = append(indexes, index) + + // verify we have CRC-Anchor. + r, err := NewLogRecordReader(logger, filepath.Join(dirName, walNames[i])) + if err != nil { + // check if it is the last file and return a special error that allows a repair. + if i == len(walNames)-1 { + logger.Errorf("wal: failed to create reader for last file: %s; error: %s; this may possibly be repaired.", name, err) + return nil, io.ErrUnexpectedEOF + } + return nil, fmt.Errorf("wal: failed to create reader for file: %s; error: %s", name, err) + } + err = r.Close() + if err != nil { + return nil, fmt.Errorf("wal: failed to close reader for file: %s; error: %s", name, err) + } + + //verify no gaps + if i == 0 { + continue + } + if index != (indexes[i-1] + 1) { + return nil, errors.New("wal: files not in sequence") + } + } + + sort.Slice(indexes, + func(i, j int) bool { + return indexes[i] < indexes[j] + }, + ) + + return indexes, nil +} + +func getPadSize(recordLength int) int { + return (8 - recordLength%8) % 8 +} + +func getPadBytes(recordLength int) (int, []byte) { + i := getPadSize(recordLength) + return i, padTable[i] +} + +func parseWalFileName(fileName string) (index uint64, err error) { + n, err := fmt.Sscanf(fileName, walFileTemplate, &index) + if n != 1 || err != nil { + return 0, fmt.Errorf("failed to parse wal file name: %s; error: %s", fileName, err) + } + return index, nil +} + +// renameResetWalFile reset anchor on a temporary file, and then rename to next file name. +func renameResetWalFile(recycleFile, nextFile string) (err error) { + tmpFilePath := recycleFile + ".tmp" + if err = os.Rename(recycleFile, tmpFilePath); err != nil { + return err + } + tmpF, err := os.OpenFile(tmpFilePath, os.O_RDWR, walFilePermPrivateRW) + if err != nil { + return err + } + if _, err = tmpF.Seek(0, io.SeekStart); err != nil { + return err + } + if _, err = tmpF.Write(make([]byte, 1024)); err != nil { // overwrite the CRC-Anchor + return err + } + if err = tmpF.Close(); err != nil { + return err + } + if err = os.Rename(tmpFilePath, nextFile); err != nil { + return err + } + + return nil +} + +func copyFile(source, target string) error { + input, err := ioutil.ReadFile(source) + if err != nil { + return err + } + err = ioutil.WriteFile(target, input, 0644) + return err +} + +func truncateCloseFile(f *os.File, offset int64) error { + if err := f.Truncate(offset); err != nil { + return fmt.Errorf("Failed to truncate at: %d; log file: %s; error: %s", offset, f.Name(), err) + } + + if err := f.Sync(); err != nil { + return fmt.Errorf("Failed to sync log file: %s; error: %s", f.Name(), err) + + } + + if err := f.Close(); err != nil { + return fmt.Errorf("Failed to close log file: %s; error: %s", f.Name(), err) + } + + return nil +} + +// scanVerifyFiles +func scanVerifyFiles(logger api.Logger, dirPath string, files []string) error { + var crc uint32 + var num, numTotal int + for i, name := range files { + fullName := filepath.Join(dirPath, name) + r, err := NewLogRecordReader(logger, fullName) + if err != nil { + return err + } + + if num, err = scanVerify1(logger, r, i, crc); err != nil { + return err + } + + numTotal += num + crc = r.crc // final CRC of file + } + + logger.Debugf("Scanned %d records in %d files", numTotal, len(files)) + return nil +} + +func scanVerify1(logger api.Logger, r *LogRecordReader, i int, crc uint32) (num int, err error) { + defer func() { _ = r.Close() }() + + if i > 0 && crc != r.crc { + logger.Errorf("Anchor-CRC %08X of file: %s, does not match previous: %08X", r.crc, r.logFile.Name(), crc) + return 0, ErrCRC + } + + var readErr error + for readErr == nil { + num++ + _, readErr = r.Read() + } + if readErr != io.EOF { + return num - 1, readErr + } + + return num - 1, nil +} + +// scanRepairFile scans the file to the last good record and truncates after it. If even the CRC-Anchor cannot be +// read, the file is deleted. +func scanRepairFile(logger api.Logger, lastFile string) error { + logger.Debugf("Trying to repair file: %s", lastFile) + + r, err := NewLogRecordReader(logger, lastFile) + if err != nil { + logger.Warnf("Write-Ahead-Log could not open the last file, due to error: %s", err) + if err = os.Remove(lastFile); err != nil { + return err + } + logger.Warnf("Write-Ahead-Log DELETED the last file (a copy was saved): %s", lastFile) + return nil + } + + // scan to the last good record + offset, err := r.logFile.Seek(0, io.SeekCurrent) + if err != nil { + _ = r.Close() + return err + } + + num := 0 + for { + _, readErr := r.Read() + if readErr != nil { + logger.Debugf("Read error: %s", readErr) + + if closeErr := r.Close(); closeErr != nil { + return closeErr + } + + if readErr == io.EOF { + logger.Debugf("Read %d good records till EOF, no need to repair", num) + return nil // no need to repair + } + break + } + // keep the end offset of a good record + num++ + offset, err = r.logFile.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + } + + logger.Debugf("Read %d good records, last good record ended at offset: %d", num, offset) + + f, err := os.OpenFile(lastFile, os.O_RDWR, walFilePermPrivateRW) + if err != nil { + logger.Errorf("Failed to open log file: %s; error: %s", lastFile, err) + return err + } + + if err = truncateCloseFile(f, offset); err != nil { + logger.Errorf("Failed to truncateCloseFile: error: %s", err) + return err + } + + return nil +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/writeaheadlog.go b/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/writeaheadlog.go new file mode 100644 index 00000000000..f659e402c70 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/pkg/wal/writeaheadlog.go @@ -0,0 +1,706 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +package wal + +import ( + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/SmartBFT-Go/consensus/pkg/api" + protos "github.com/SmartBFT-Go/consensus/smartbftprotos" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" +) + +const ( + walFileSuffix string = ".wal" + walFileTemplate = "%016x" + walFileSuffix + + walFilePermPrivateRW os.FileMode = 0600 + walDirPermPrivateRWX os.FileMode = 0700 + + recordHeaderSize int = 8 + recordLengthMask uint64 = 0x00000000FFFFFFFF + recordCRCMask uint64 = recordLengthMask << 32 + + walCRCSeed uint32 = 0xDEED0001 + + FileSizeBytesDefault int64 = 64 * 1024 * 1024 // 64MB + BufferSizeBytesDefault int64 = 1024 * 1024 // 1MB +) + +var ( + ErrCRC = errors.New("wal: crc verification failed") + ErrWriteOnly = errors.New("wal: in WRITE mode") + ErrReadOnly = errors.New("wal: in READ mode") + + ErrWALAlreadyExists = errors.New("wal: is already exists") + + crcTable = crc32.MakeTable(crc32.Castagnoli) +) + +type LogRecordLength uint32 +type LogRecordCRC uint32 + +// LogRecordHeader contains the LogRecordLength (lower 32 bits) and LogRecordCRC (upper 32 bits). +type LogRecordHeader uint64 + +// WriteAheadLogFile is a simple implementation of a write ahead log (WAL). +// +// The WAL is composed of a sequence of frames. Each frame contains: +// - a header (uint64) +// - payload: a record of type LogRecord, marshaled to bytes, and padded with zeros to 8B boundary. +// +// The 64 bit header is made of two parts: +// - length of the marshaled LogRecord (not including pad bytes), in the lower 32 bits. +// - a crc32 of the data: marshaled record bytes + pad bytes, in the upper 32 bits. +// +// The WAL is written to a sequence of files: .wal, where index uint64=1,2,3...; represented in fixed-width +// hex format, e.g. 0000000000000001.wal +// +// The WAL has two modes: append, and read. +// +// When a WAL is first created, it is in append mode. +// When an existing WAL is opened, it is in read mode, and will change to append mode only after ReadAll() is invoked. +// +// In append mode the WAL can accept Append() and TruncateTo() calls. +// The WAL must be closed after use to release all resources. +// +type WriteAheadLogFile struct { + dirName string + options *Options + + logger api.Logger + + mutex sync.Mutex + dirFile *os.File + index uint64 + logFile *os.File + headerBuff []byte + dataBuff *proto.Buffer + crc uint32 + readMode bool + truncateIndex uint64 + activeIndexes []uint64 +} + +type Options struct { + FileSizeBytes int64 + BufferSizeBytes int64 +} + +// DefaultOptions returns the set of default options. +func DefaultOptions() *Options { + return &Options{ + FileSizeBytes: FileSizeBytesDefault, + BufferSizeBytes: BufferSizeBytesDefault, + } +} + +func (o *Options) String() string { + return fmt.Sprintf("{FileSizeBytes: %d, BufferSizeBytes: %d}", o.FileSizeBytes, o.BufferSizeBytes) +} + +// Create will create a new WAL, if it does not exist, or an error if it already exists. +// +// logger: reference to a Logger implementation. +// dirPath: directory path of the WAL. +// options: a structure containing Options, or nil, for default options. +// +// return: pointer to a WAL, ErrWALAlreadyExists if WAL already exists or other errors +func Create(logger api.Logger, dirPath string, options *Options) (*WriteAheadLogFile, error) { + if logger == nil { + return nil, errors.New("wal: logger is nil") + } + + if !dirEmpty(dirPath) { + return nil, ErrWALAlreadyExists + } + opt := DefaultOptions() + if options != nil { + opt = options + } + + // TODO BACKLOG: create the directory & file atomically by creation in a temp dir and renaming + cleanDirName := filepath.Clean(dirPath) + err := dirCreate(cleanDirName) + if err != nil { + return nil, fmt.Errorf("wal: could not create directory: %s; error: %s", dirPath, err) + } + + wal := &WriteAheadLogFile{ + dirName: cleanDirName, + options: opt, + logger: logger, + index: 1, + headerBuff: make([]byte, 8), + dataBuff: proto.NewBuffer(make([]byte, opt.BufferSizeBytes)), + crc: walCRCSeed, + truncateIndex: 1, + activeIndexes: []uint64{1}, + } + + wal.dirFile, err = os.Open(cleanDirName) + if err != nil { + wal.Close() + return nil, fmt.Errorf("wal: could not open directory: %s; error: %s", dirPath, err) + } + + fileName := fmt.Sprintf(walFileTemplate, uint64(1)) + wal.logFile, err = os.OpenFile(filepath.Join(cleanDirName, fileName), os.O_CREATE|os.O_WRONLY, walFilePermPrivateRW) + if err != nil { + wal.Close() + return nil, fmt.Errorf("wal: could not open file: %s; error: %s", fileName, err) + } + + if err = wal.saveCRC(); err != nil { + wal.Close() + return nil, err + } + + wal.logger.Infof("Write-Ahead-Log created successfully, mode: WRITE, dir: %s", wal.dirName) + return wal, nil +} + +// Open will open an existing WAL, if it exists, or an error if it does not exist. +// +// After opening, the WAL is in read mode, and expects a call to ReadAll(). An attempt to write +// (e.g. Append, TruncateTo) will result in an error. +// +// logger: reference to a Logger implementation. +// dirPath: directory path of the WAL. +// options: a structure containing Options, or nil, for default options. +// +// return: pointer to a WAL, or an error +func Open(logger api.Logger, dirPath string, options *Options) (*WriteAheadLogFile, error) { + if logger == nil { + return nil, errors.New("wal: logger is nil") + } + + walNames, err := dirReadWalNames(dirPath) + if err != nil { + return nil, err + } + if len(walNames) == 0 { + return nil, os.ErrNotExist + } + + logger.Infof("Write-Ahead-Log discovered %d wal files: %s", len(walNames), strings.Join(walNames, ", ")) + + opt := DefaultOptions() + if options != nil { + opt = options + } + + cleanDirName := filepath.Clean(dirPath) + + wal := &WriteAheadLogFile{ + dirName: cleanDirName, + options: opt, + logger: logger, + headerBuff: make([]byte, 8), + dataBuff: proto.NewBuffer(make([]byte, opt.BufferSizeBytes)), + readMode: true, + } + + wal.dirFile, err = os.Open(cleanDirName) + if err != nil { + _ = wal.Close() + return nil, fmt.Errorf("wal: could not open directory: %s; error: %s", dirPath, err) + } + + // After the check we have an increasing, continuous sequence, with valid CRC-Anchors in each file. + wal.activeIndexes, err = checkWalFiles(logger, dirPath, walNames) + if err != nil { + _ = wal.Close() + return nil, err + } + + fileName := walNames[0] // first valid file + wal.index, err = parseWalFileName(fileName) + if err != nil { + _ = wal.Close() + return nil, err + } + + wal.logger.Infof("Write-Ahead-Log opened successfully, mode: READ, dir: %s", wal.dirName) + + return wal, nil +} + +// Repair tries to repair the last file of a WAL, in case an Open() comes back with a io.ErrUnexpectedEOF, +// which indicates the possibility to fix the WAL. +// +// After a crash, the last log file in the WAL may be left in a state where an attempt tp reopen will result in +// the last Read() returning an error. This is because of several reasons: +// - we use pre-allocated / recycled files, the files have a "garbage" tail +// - the last write may have been torn +// - the failure might have occurred when the log file was being prepared (no anchor) +// +// The Repair() tries to repair the last file of the wal by truncating after the last good record. +// Before doing so it copies the bad file to a side location for later analysis by operators. +// +// logger: reference to a Logger implementation. +// dirPath: directory path of the WAL. +// return: an error if repair was not successful. +func Repair(logger api.Logger, dirPath string) error { + cleanDirPath := filepath.Clean(dirPath) + walNames, err := dirReadWalNames(cleanDirPath) + if err != nil { + return err + } + if len(walNames) == 0 { + return os.ErrNotExist + } + logger.Infof("Write-Ahead-Log discovered %d wal files: %s", len(walNames), strings.Join(walNames, ", ")) + + // verify that all but the last are fine + if err = scanVerifyFiles(logger, cleanDirPath, walNames[:len(walNames)-1]); err != nil { + logger.Errorf("Write-Ahead-Log failed to repair, additional files are faulty: %s", err) + return err + } + + lastFile := filepath.Join(cleanDirPath, walNames[len(walNames)-1]) + logger.Infof("Write-Ahead-Log is going to try and repair the last file: %s", lastFile) + lastFileCopy := lastFile + ".copy" + err = copyFile(lastFile, lastFileCopy) + if err != nil { + logger.Errorf("Write-Ahead-Log failed to repair, could not make a copy: %s", err) + return err + } + logger.Infof("Write-Ahead-Log made a copy of the last file: %s", lastFileCopy) + + err = scanRepairFile(logger, lastFile) + if err != nil { + logger.Errorf("Write-Ahead-Log failed to scan and repair last file: %s", err) + return err + } + + logger.Infof("Write-Ahead-Log successfully repaired the last file: %s", lastFile) + return nil +} + +// Close the files and directory of the WAL, and release all resources. +func (w *WriteAheadLogFile) Close() error { + var errF, errD error + + w.mutex.Lock() + defer w.mutex.Unlock() + + if w.logFile != nil { + if errF = w.truncateAndCloseLogFile(); errF != nil { + w.logger.Errorf("failed to properly close log file %s; error: %s", w.logFile.Name(), errF) + } + w.logFile = nil + } + + w.dataBuff = nil + w.headerBuff = nil + + if w.dirFile != nil { + if errD = w.dirFile.Close(); errD != nil { + w.logger.Errorf("failed to properly close directory %s; error: %s", w.dirName, errD) + } + w.dirFile = nil + } + + // return the first error + switch { + case errF != nil: + return errF + default: + return errD + } +} + +// CRC returns the last CRC written to the log file. +func (w *WriteAheadLogFile) CRC() uint32 { + w.mutex.Lock() + defer w.mutex.Unlock() + + return w.crc +} + +// TruncateTo appends a control record in which the TruncateTo flag is true. +// This marks that every record prior to this one can be safely truncated from the log. +func (w *WriteAheadLogFile) TruncateTo() error { + record := &protos.LogRecord{ + Type: protos.LogRecord_CONTROL, + TruncateTo: true, + } + + w.mutex.Lock() + defer w.mutex.Unlock() + + return w.append(record) +} + +// Append a data item to the end of the WAL and indicate whether this entry is a truncation point. +// +// The data item will be added to the log, and internally marked with a flag that indicates whether +// it is a truncation point. The log implementation may truncate all preceding data items, not including this one. +// +// data: the data to be appended to the log. Cannot be nil or empty. +// truncateTo: whether all records preceding this one, but not including it, can be truncated from the log. +func (w *WriteAheadLogFile) Append(data []byte, truncateTo bool) error { + if len(data) == 0 { + return errors.New("data is nil or empty") + } + + record := &protos.LogRecord{ + Type: protos.LogRecord_ENTRY, + TruncateTo: truncateTo, + Data: data, + } + + w.mutex.Lock() + defer w.mutex.Unlock() + + return w.append(record) +} + +func (w *WriteAheadLogFile) append(record *protos.LogRecord) error { + if w.dirFile == nil { + return os.ErrClosed + } + if w.readMode { + return ErrReadOnly + } + + w.dataBuff.Reset() + err := w.dataBuff.Marshal(record) + if err != nil { + return fmt.Errorf("wal: failed to marshal to data buffer: %s", err) + } + + payloadBuff := w.dataBuff.Bytes() + recordLength := len(payloadBuff) + if (uint64(recordLength) & recordCRCMask) != 0 { + return fmt.Errorf("wal: record too big, length does not fit in uint32: %d", recordLength) + } + padSize, padBytes := getPadBytes(recordLength) + if padSize != 0 { + payloadBuff = append(payloadBuff, padBytes...) + } + dataCRC := crc32.Update(w.crc, crcTable, payloadBuff) + header := uint64(recordLength) | (uint64(dataCRC) << 32) + + binary.LittleEndian.PutUint64(w.headerBuff, header) + nh, err := w.logFile.Write(w.headerBuff) + if err != nil { + return fmt.Errorf("wal: failed to write header bytes: %s", err) + } + + np, err := w.logFile.Write(payloadBuff) + if err != nil { + return fmt.Errorf("wal: failed to write payload bytes: %s", err) + } + + err = w.logFile.Sync() + if err != nil { + return fmt.Errorf("wal: failed to Sync log file: %s", err) + } + w.crc = dataCRC + + offset, err := w.logFile.Seek(0, io.SeekCurrent) + if err != nil { + return fmt.Errorf("wal: failed to get offset from log file: %s", err) + } + + if record.TruncateTo { + w.truncateIndex = w.index + } + w.logger.Debugf("LogRecord appended successfully: total size=%d, recordLength=%d, dataCRC=%08X; file=%s, new-offset=%d", + (nh + np), recordLength, dataCRC, w.logFile.Name(), offset) + + // Switch files if this or the next record (minimal size is 16B) cause overflow + if offset > w.options.FileSizeBytes-16 { + err = w.switchFiles() + if err != nil { + return fmt.Errorf("wal: failed to switch log files: %s", err) + } + } + + return nil +} + +// ReadAll the data items from the latest truncation point to the end of the log. +// This method can be called only at the beginning of the WAL lifecycle, right after Open(). +// After a successful invocation the WAL moves to write mode, and is ready to Append(). +// +// In case of failure: +// - an error of type io.ErrUnexpectedEOF is returned when the WAL can possibly be repaired by truncating the last +// log file after the last good record. +// - all other errors indicate that the WAL is either +// - is closed, or +// - is in write mode, or +// - is corrupted beyond the simple repair measure described above. +func (w *WriteAheadLogFile) ReadAll() ([][]byte, error) { + w.mutex.Lock() + defer w.mutex.Unlock() + + if w.dirFile == nil { + return nil, os.ErrClosed + } + + if !w.readMode { + return nil, ErrWriteOnly + } + + var items = make([][]byte, 0) + var lastIndex = w.activeIndexes[len(w.activeIndexes)-1] + +FileLoop: + for i, index := range w.activeIndexes { + w.index = index + // This should not fail, we check the files earlier, when we Open() the WAL. + r, err := NewLogRecordReader(w.logger, filepath.Join(w.dirName, fmt.Sprintf(walFileTemplate, w.index))) + if err != nil { + return nil, err + } + if (i != 0) && (r.CRC() != w.crc) { + return nil, ErrCRC + } + + var readErr error + ReadLoop: + for i := 1; ; i++ { + var rec *protos.LogRecord + rec, readErr = r.Read() + if readErr != nil { + w.logger.Debugf("Read error, file: %s; error: %s", r.fileName, readErr) + r.Close() + break ReadLoop + } + + if rec.TruncateTo { + items = items[0:0] + w.truncateIndex = w.index + } + + if rec.Type == protos.LogRecord_ENTRY { + items = append(items, rec.Data) + } + + w.logger.Debugf("Read record #%d, file: %s", i, r.fileName) + } + + if readErr == io.EOF { + w.logger.Debugf("Reached EOF, finished reading file: %s; CRC: %08X", r.fileName, r.CRC()) + w.crc = r.CRC() + continue FileLoop + } + + if index == lastIndex && (readErr == io.ErrUnexpectedEOF || readErr == ErrCRC) { + w.logger.Warnf("Received an error in the last file, this can possibly be repaired; file: %s; error: %s", r.fileName, readErr) + // This error is returned when the WAL can possibly be repaired + return nil, io.ErrUnexpectedEOF + } + + if readErr != nil { + w.logger.Warnf("Failed reading file: %s; error: %s", r.fileName, readErr) + return nil, fmt.Errorf("failed reading wal: %s", readErr) + } + } + + w.logger.Debugf("Read %d items", len(items)) + + // move to write mode on a new file. + if err := w.recycleOrCreateFile(); err != nil { + w.logger.Errorf("Failed to move to a new file: %s", err) + return nil, err + } + w.readMode = false + + w.logger.Infof("Write-Ahead-Log read %d entries, mode: WRITE", len(items)) + return items, nil +} + +// truncateAndCloseLogFile when we orderly close a writable log file we truncate it. +// This way, reading it in ReadAll() ends with a io.EOF error after the last record. +func (w *WriteAheadLogFile) truncateAndCloseLogFile() error { + if w.readMode { + if err := w.logFile.Close(); err != nil { + w.logger.Errorf("Failed to close log file: %s; error: %s", w.logFile.Name(), err) + return err + } + w.logger.Debugf("Closed log file: %s", w.logFile.Name()) + return nil + } + + offset, err := w.logFile.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + + if err = truncateCloseFile(w.logFile, offset); err != nil { + return err + } + + w.logger.Debugf("Truncated, Sync'ed & Closed log file: %s", w.logFile.Name()) + return nil +} + +func (w *WriteAheadLogFile) switchFiles() error { + var err error + + w.logger.Debugf("Number of files: %d, active indexes: %v, truncation index: %d", + len(w.activeIndexes), w.activeIndexes, w.truncateIndex) + + if w.readMode { + return ErrReadOnly + } + + if err = w.truncateAndCloseLogFile(); err != nil { + w.logger.Errorf("Failed to truncateAndCloseLogFile: %s", err) + return err + } + + err = w.recycleOrCreateFile() + if err != nil { + return err + } + + w.logger.Debugf("Successfully switched to log file: %s", w.logFile.Name()) + w.logger.Debugf("Number of files: %d, active indexes: %v, truncation index: %d", + len(w.activeIndexes), w.activeIndexes, w.truncateIndex) + + return nil +} + +func (w *WriteAheadLogFile) recycleOrCreateFile() error { + var err error + + w.index++ + nextFileName := fmt.Sprintf(walFileTemplate, w.index) + nextFilePath := filepath.Join(w.dirFile.Name(), nextFileName) + w.logger.Debugf("Preparing next log file: %s", nextFilePath) + + if w.activeIndexes[0] < w.truncateIndex { + recycleFileName := fmt.Sprintf(walFileTemplate, w.activeIndexes[0]) + recycleFilePath := filepath.Join(w.dirFile.Name(), recycleFileName) + w.logger.Debugf("Recycling log file: %s to %s", recycleFileName, nextFileName) + + if err = renameResetWalFile(recycleFilePath, nextFilePath); err != nil { + return err + } + + w.logFile, err = os.OpenFile(nextFilePath, os.O_WRONLY, walFilePermPrivateRW) + if err != nil { + return err + } + + indexes := w.activeIndexes[1:] + w.activeIndexes = append(make([]uint64, 0, len(indexes)), indexes...) + } else { + w.logger.Debugf("Creating log file: %s", nextFileName) + w.logFile, err = os.OpenFile(nextFilePath, os.O_CREATE|os.O_WRONLY, walFilePermPrivateRW) + if err != nil { + return err + } + } + + if _, err = w.logFile.Seek(0, io.SeekStart); err != nil { + return err + } + + if err = w.saveCRC(); err != nil { + return err + } + + w.activeIndexes = append(w.activeIndexes, w.index) + + return nil +} + +// saveCRC saves the current CRC followed by a CRC_ANCHOR record. +func (w *WriteAheadLogFile) saveCRC() error { + anchorRecord := &protos.LogRecord{Type: protos.LogRecord_CRC_ANCHOR, TruncateTo: false} + b, err := proto.Marshal(anchorRecord) + if err != nil { + return err + } + recordLength := len(b) + padSize, padBytes := getPadBytes(recordLength) + if padSize != 0 { + b = append(b, padBytes...) + } + + header := uint64(recordLength) | (uint64(w.crc) << 32) + binary.LittleEndian.PutUint64(w.headerBuff, header) + offset, err := w.logFile.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + nh, err := w.logFile.Write(w.headerBuff) + if err != nil { + return fmt.Errorf("wal: failed to write crc-anchor header bytes: %s", err) + } + + nb, err := w.logFile.Write(b) + if err != nil { + return fmt.Errorf("wal: failed to write crc-anchor payload bytes: %s", err) + } + err = w.logFile.Sync() + if err != nil { + return fmt.Errorf("wal: failed to Sync: %s", err) + } + + w.logger.Debugf("CRC-Anchor %08X written to file: %s, at offset %d, size=%d", w.crc, w.logFile.Name(), offset, nh+nb) + + return nil +} + +func InitializeAndReadAll(logger api.Logger, walDir string, options *Options) (writeAheadLog *WriteAheadLogFile, initialState [][]byte, err error) { + logger.Infof("Trying to creating a Write-Ahead-Log at dir: %s", walDir) + logger.Debugf("Write-Ahead-Log options: %s", options) + + writeAheadLog, err = Create(logger, walDir, options) + if err != nil { + if err != ErrWALAlreadyExists { + err = errors.Wrap(err, "Cannot create Write-Ahead-Log") + return nil, nil, err + } + + logger.Infof("Write-Ahead-Log already exists at dir: %s; Trying to open", walDir) + writeAheadLog, err = Open(logger, walDir, options) + if err != nil { + err = errors.Wrap(err, "Cannot open Write-Ahead-Log") + return nil, nil, err + } + + initialState, err = writeAheadLog.ReadAll() + if err != nil { + if err != io.ErrUnexpectedEOF { + err = errors.Wrap(err, "Cannot read initial state from Write-Ahead-Log") + return nil, nil, err + } + + logger.Infof("Received io.ErrUnexpectedEOF, trying to repair Write-Ahead-Log at dir: %s", walDir) + err = Repair(logger, walDir) + if err != nil { + err = errors.Wrap(err, "Cannot repair Write-Ahead-Log") + return nil, nil, err + } + + logger.Infof("Reading Write-Ahead-Log initial state after repair") + initialState, err = writeAheadLog.ReadAll() + if err != nil { + err = errors.Wrap(err, "Cannot initial state from Write-Ahead-Log, after repair") + return nil, nil, err + } + } + } + logger.Infof("Write-Ahead-Log initialized successfully, initial state contains %d entries", len(initialState)) + + return writeAheadLog, initialState, err +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/logrecord.pb.go b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/logrecord.pb.go new file mode 100644 index 00000000000..c7e61588384 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/logrecord.pb.go @@ -0,0 +1,229 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0-devel +// protoc v3.12.3 +// source: smartbftprotos/logrecord.proto + +package smartbftprotos + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LogRecord_Type int32 + +const ( + LogRecord_ENTRY LogRecord_Type = 0 // Contains data + LogRecord_CONTROL LogRecord_Type = 1 // Does not contain data, only control fields + LogRecord_CRC_ANCHOR LogRecord_Type = 2 // A CRC anchor point +) + +// Enum value maps for LogRecord_Type. +var ( + LogRecord_Type_name = map[int32]string{ + 0: "ENTRY", + 1: "CONTROL", + 2: "CRC_ANCHOR", + } + LogRecord_Type_value = map[string]int32{ + "ENTRY": 0, + "CONTROL": 1, + "CRC_ANCHOR": 2, + } +) + +func (x LogRecord_Type) Enum() *LogRecord_Type { + p := new(LogRecord_Type) + *p = x + return p +} + +func (x LogRecord_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LogRecord_Type) Descriptor() protoreflect.EnumDescriptor { + return file_smartbftprotos_logrecord_proto_enumTypes[0].Descriptor() +} + +func (LogRecord_Type) Type() protoreflect.EnumType { + return &file_smartbftprotos_logrecord_proto_enumTypes[0] +} + +func (x LogRecord_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LogRecord_Type.Descriptor instead. +func (LogRecord_Type) EnumDescriptor() ([]byte, []int) { + return file_smartbftprotos_logrecord_proto_rawDescGZIP(), []int{0, 0} +} + +type LogRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type LogRecord_Type `protobuf:"varint,1,opt,name=type,proto3,enum=smartbftprotos.LogRecord_Type" json:"type,omitempty"` + TruncateTo bool `protobuf:"varint,2,opt,name=truncate_to,json=truncateTo,proto3" json:"truncate_to,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *LogRecord) Reset() { + *x = LogRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_logrecord_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogRecord) ProtoMessage() {} + +func (x *LogRecord) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_logrecord_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogRecord.ProtoReflect.Descriptor instead. +func (*LogRecord) Descriptor() ([]byte, []int) { + return file_smartbftprotos_logrecord_proto_rawDescGZIP(), []int{0} +} + +func (x *LogRecord) GetType() LogRecord_Type { + if x != nil { + return x.Type + } + return LogRecord_ENTRY +} + +func (x *LogRecord) GetTruncateTo() bool { + if x != nil { + return x.TruncateTo + } + return false +} + +func (x *LogRecord) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +var File_smartbftprotos_logrecord_proto protoreflect.FileDescriptor + +var file_smartbftprotos_logrecord_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2f, 0x6c, 0x6f, 0x67, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x22, 0xa4, 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x32, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x73, + 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x4c, 0x6f, + 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, + 0x65, 0x54, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, + 0x4e, 0x54, 0x52, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x52, 0x43, 0x5f, 0x41, + 0x4e, 0x43, 0x48, 0x4f, 0x52, 0x10, 0x02, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6d, 0x61, 0x72, 0x74, 0x42, 0x46, 0x54, 0x2d, 0x47, + 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x73, 0x6d, 0x61, 0x72, + 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_smartbftprotos_logrecord_proto_rawDescOnce sync.Once + file_smartbftprotos_logrecord_proto_rawDescData = file_smartbftprotos_logrecord_proto_rawDesc +) + +func file_smartbftprotos_logrecord_proto_rawDescGZIP() []byte { + file_smartbftprotos_logrecord_proto_rawDescOnce.Do(func() { + file_smartbftprotos_logrecord_proto_rawDescData = protoimpl.X.CompressGZIP(file_smartbftprotos_logrecord_proto_rawDescData) + }) + return file_smartbftprotos_logrecord_proto_rawDescData +} + +var file_smartbftprotos_logrecord_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_smartbftprotos_logrecord_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_smartbftprotos_logrecord_proto_goTypes = []interface{}{ + (LogRecord_Type)(0), // 0: smartbftprotos.LogRecord.Type + (*LogRecord)(nil), // 1: smartbftprotos.LogRecord +} +var file_smartbftprotos_logrecord_proto_depIdxs = []int32{ + 0, // 0: smartbftprotos.LogRecord.type:type_name -> smartbftprotos.LogRecord.Type + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_smartbftprotos_logrecord_proto_init() } +func file_smartbftprotos_logrecord_proto_init() { + if File_smartbftprotos_logrecord_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_smartbftprotos_logrecord_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_smartbftprotos_logrecord_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_smartbftprotos_logrecord_proto_goTypes, + DependencyIndexes: file_smartbftprotos_logrecord_proto_depIdxs, + EnumInfos: file_smartbftprotos_logrecord_proto_enumTypes, + MessageInfos: file_smartbftprotos_logrecord_proto_msgTypes, + }.Build() + File_smartbftprotos_logrecord_proto = out.File + file_smartbftprotos_logrecord_proto_rawDesc = nil + file_smartbftprotos_logrecord_proto_goTypes = nil + file_smartbftprotos_logrecord_proto_depIdxs = nil +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/logrecord.proto b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/logrecord.proto new file mode 100644 index 00000000000..f3b9ca6f361 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/logrecord.proto @@ -0,0 +1,24 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +syntax = "proto3"; + +option go_package = "github.com/SmartBFT-Go/consensus/smartbftprotos"; + +package smartbftprotos; + + +message LogRecord { + + enum Type { + ENTRY = 0; // Contains data + CONTROL = 1; // Does not contain data, only control fields + CRC_ANCHOR = 2; // A CRC anchor point + } + + Type type = 1; + bool truncate_to = 2; + bytes data = 3; +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/messages.pb.go b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/messages.pb.go new file mode 100644 index 00000000000..98f79ff1890 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/messages.pb.go @@ -0,0 +1,1814 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0-devel +// protoc v3.12.3 +// source: smartbftprotos/messages.proto + +package smartbftprotos + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Content: + // *Message_PrePrepare + // *Message_Prepare + // *Message_Commit + // *Message_ViewChange + // *Message_ViewData + // *Message_NewView + // *Message_HeartBeat + // *Message_HeartBeatResponse + // *Message_StateTransferRequest + // *Message_StateTransferResponse + Content isMessage_Content `protobuf_oneof:"content"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{0} +} + +func (m *Message) GetContent() isMessage_Content { + if m != nil { + return m.Content + } + return nil +} + +func (x *Message) GetPrePrepare() *PrePrepare { + if x, ok := x.GetContent().(*Message_PrePrepare); ok { + return x.PrePrepare + } + return nil +} + +func (x *Message) GetPrepare() *Prepare { + if x, ok := x.GetContent().(*Message_Prepare); ok { + return x.Prepare + } + return nil +} + +func (x *Message) GetCommit() *Commit { + if x, ok := x.GetContent().(*Message_Commit); ok { + return x.Commit + } + return nil +} + +func (x *Message) GetViewChange() *ViewChange { + if x, ok := x.GetContent().(*Message_ViewChange); ok { + return x.ViewChange + } + return nil +} + +func (x *Message) GetViewData() *SignedViewData { + if x, ok := x.GetContent().(*Message_ViewData); ok { + return x.ViewData + } + return nil +} + +func (x *Message) GetNewView() *NewView { + if x, ok := x.GetContent().(*Message_NewView); ok { + return x.NewView + } + return nil +} + +func (x *Message) GetHeartBeat() *HeartBeat { + if x, ok := x.GetContent().(*Message_HeartBeat); ok { + return x.HeartBeat + } + return nil +} + +func (x *Message) GetHeartBeatResponse() *HeartBeatResponse { + if x, ok := x.GetContent().(*Message_HeartBeatResponse); ok { + return x.HeartBeatResponse + } + return nil +} + +func (x *Message) GetStateTransferRequest() *StateTransferRequest { + if x, ok := x.GetContent().(*Message_StateTransferRequest); ok { + return x.StateTransferRequest + } + return nil +} + +func (x *Message) GetStateTransferResponse() *StateTransferResponse { + if x, ok := x.GetContent().(*Message_StateTransferResponse); ok { + return x.StateTransferResponse + } + return nil +} + +type isMessage_Content interface { + isMessage_Content() +} + +type Message_PrePrepare struct { + PrePrepare *PrePrepare `protobuf:"bytes,1,opt,name=pre_prepare,json=prePrepare,proto3,oneof"` +} + +type Message_Prepare struct { + Prepare *Prepare `protobuf:"bytes,2,opt,name=prepare,proto3,oneof"` +} + +type Message_Commit struct { + Commit *Commit `protobuf:"bytes,3,opt,name=commit,proto3,oneof"` +} + +type Message_ViewChange struct { + ViewChange *ViewChange `protobuf:"bytes,4,opt,name=view_change,json=viewChange,proto3,oneof"` +} + +type Message_ViewData struct { + ViewData *SignedViewData `protobuf:"bytes,5,opt,name=view_data,json=viewData,proto3,oneof"` +} + +type Message_NewView struct { + NewView *NewView `protobuf:"bytes,6,opt,name=new_view,json=newView,proto3,oneof"` +} + +type Message_HeartBeat struct { + HeartBeat *HeartBeat `protobuf:"bytes,7,opt,name=heart_beat,json=heartBeat,proto3,oneof"` +} + +type Message_HeartBeatResponse struct { + HeartBeatResponse *HeartBeatResponse `protobuf:"bytes,8,opt,name=heart_beat_response,json=heartBeatResponse,proto3,oneof"` +} + +type Message_StateTransferRequest struct { + StateTransferRequest *StateTransferRequest `protobuf:"bytes,9,opt,name=state_transfer_request,json=stateTransferRequest,proto3,oneof"` +} + +type Message_StateTransferResponse struct { + StateTransferResponse *StateTransferResponse `protobuf:"bytes,10,opt,name=state_transfer_response,json=stateTransferResponse,proto3,oneof"` +} + +func (*Message_PrePrepare) isMessage_Content() {} + +func (*Message_Prepare) isMessage_Content() {} + +func (*Message_Commit) isMessage_Content() {} + +func (*Message_ViewChange) isMessage_Content() {} + +func (*Message_ViewData) isMessage_Content() {} + +func (*Message_NewView) isMessage_Content() {} + +func (*Message_HeartBeat) isMessage_Content() {} + +func (*Message_HeartBeatResponse) isMessage_Content() {} + +func (*Message_StateTransferRequest) isMessage_Content() {} + +func (*Message_StateTransferResponse) isMessage_Content() {} + +type PrePrepare struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + View uint64 `protobuf:"varint,1,opt,name=view,proto3" json:"view,omitempty"` + Seq uint64 `protobuf:"varint,2,opt,name=seq,proto3" json:"seq,omitempty"` + Proposal *Proposal `protobuf:"bytes,3,opt,name=proposal,proto3" json:"proposal,omitempty"` + PrevCommitSignatures []*Signature `protobuf:"bytes,4,rep,name=prev_commit_signatures,json=prevCommitSignatures,proto3" json:"prev_commit_signatures,omitempty"` +} + +func (x *PrePrepare) Reset() { + *x = PrePrepare{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrePrepare) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrePrepare) ProtoMessage() {} + +func (x *PrePrepare) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrePrepare.ProtoReflect.Descriptor instead. +func (*PrePrepare) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{1} +} + +func (x *PrePrepare) GetView() uint64 { + if x != nil { + return x.View + } + return 0 +} + +func (x *PrePrepare) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *PrePrepare) GetProposal() *Proposal { + if x != nil { + return x.Proposal + } + return nil +} + +func (x *PrePrepare) GetPrevCommitSignatures() []*Signature { + if x != nil { + return x.PrevCommitSignatures + } + return nil +} + +type Prepare struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + View uint64 `protobuf:"varint,1,opt,name=view,proto3" json:"view,omitempty"` + Seq uint64 `protobuf:"varint,2,opt,name=seq,proto3" json:"seq,omitempty"` + Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` + Assist bool `protobuf:"varint,4,opt,name=assist,proto3" json:"assist,omitempty"` +} + +func (x *Prepare) Reset() { + *x = Prepare{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Prepare) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Prepare) ProtoMessage() {} + +func (x *Prepare) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Prepare.ProtoReflect.Descriptor instead. +func (*Prepare) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{2} +} + +func (x *Prepare) GetView() uint64 { + if x != nil { + return x.View + } + return 0 +} + +func (x *Prepare) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *Prepare) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} + +func (x *Prepare) GetAssist() bool { + if x != nil { + return x.Assist + } + return false +} + +type ProposedRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PrePrepare *PrePrepare `protobuf:"bytes,1,opt,name=pre_prepare,json=prePrepare,proto3" json:"pre_prepare,omitempty"` + Prepare *Prepare `protobuf:"bytes,2,opt,name=prepare,proto3" json:"prepare,omitempty"` +} + +func (x *ProposedRecord) Reset() { + *x = ProposedRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProposedRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProposedRecord) ProtoMessage() {} + +func (x *ProposedRecord) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProposedRecord.ProtoReflect.Descriptor instead. +func (*ProposedRecord) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{3} +} + +func (x *ProposedRecord) GetPrePrepare() *PrePrepare { + if x != nil { + return x.PrePrepare + } + return nil +} + +func (x *ProposedRecord) GetPrepare() *Prepare { + if x != nil { + return x.Prepare + } + return nil +} + +type Commit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + View uint64 `protobuf:"varint,1,opt,name=view,proto3" json:"view,omitempty"` + Seq uint64 `protobuf:"varint,2,opt,name=seq,proto3" json:"seq,omitempty"` + Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` + Signature *Signature `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` + Assist bool `protobuf:"varint,5,opt,name=assist,proto3" json:"assist,omitempty"` +} + +func (x *Commit) Reset() { + *x = Commit{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Commit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Commit) ProtoMessage() {} + +func (x *Commit) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Commit.ProtoReflect.Descriptor instead. +func (*Commit) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{4} +} + +func (x *Commit) GetView() uint64 { + if x != nil { + return x.View + } + return 0 +} + +func (x *Commit) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +func (x *Commit) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} + +func (x *Commit) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} + +func (x *Commit) GetAssist() bool { + if x != nil { + return x.Assist + } + return false +} + +type PreparesFrom struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []uint64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *PreparesFrom) Reset() { + *x = PreparesFrom{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreparesFrom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreparesFrom) ProtoMessage() {} + +func (x *PreparesFrom) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreparesFrom.ProtoReflect.Descriptor instead. +func (*PreparesFrom) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{5} +} + +func (x *PreparesFrom) GetIds() []uint64 { + if x != nil { + return x.Ids + } + return nil +} + +type ViewChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NextView uint64 `protobuf:"varint,1,opt,name=next_view,json=nextView,proto3" json:"next_view,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *ViewChange) Reset() { + *x = ViewChange{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ViewChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ViewChange) ProtoMessage() {} + +func (x *ViewChange) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ViewChange.ProtoReflect.Descriptor instead. +func (*ViewChange) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{6} +} + +func (x *ViewChange) GetNextView() uint64 { + if x != nil { + return x.NextView + } + return 0 +} + +func (x *ViewChange) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type ViewData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NextView uint64 `protobuf:"varint,1,opt,name=next_view,json=nextView,proto3" json:"next_view,omitempty"` + LastDecision *Proposal `protobuf:"bytes,2,opt,name=last_decision,json=lastDecision,proto3" json:"last_decision,omitempty"` + LastDecisionSignatures []*Signature `protobuf:"bytes,3,rep,name=last_decision_signatures,json=lastDecisionSignatures,proto3" json:"last_decision_signatures,omitempty"` + InFlightProposal *Proposal `protobuf:"bytes,4,opt,name=in_flight_proposal,json=inFlightProposal,proto3" json:"in_flight_proposal,omitempty"` + InFlightPrepared bool `protobuf:"varint,5,opt,name=in_flight_prepared,json=inFlightPrepared,proto3" json:"in_flight_prepared,omitempty"` +} + +func (x *ViewData) Reset() { + *x = ViewData{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ViewData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ViewData) ProtoMessage() {} + +func (x *ViewData) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ViewData.ProtoReflect.Descriptor instead. +func (*ViewData) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{7} +} + +func (x *ViewData) GetNextView() uint64 { + if x != nil { + return x.NextView + } + return 0 +} + +func (x *ViewData) GetLastDecision() *Proposal { + if x != nil { + return x.LastDecision + } + return nil +} + +func (x *ViewData) GetLastDecisionSignatures() []*Signature { + if x != nil { + return x.LastDecisionSignatures + } + return nil +} + +func (x *ViewData) GetInFlightProposal() *Proposal { + if x != nil { + return x.InFlightProposal + } + return nil +} + +func (x *ViewData) GetInFlightPrepared() bool { + if x != nil { + return x.InFlightPrepared + } + return false +} + +type SignedViewData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RawViewData []byte `protobuf:"bytes,1,opt,name=raw_view_data,json=rawViewData,proto3" json:"raw_view_data,omitempty"` + Signer uint64 `protobuf:"varint,2,opt,name=signer,proto3" json:"signer,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *SignedViewData) Reset() { + *x = SignedViewData{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignedViewData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignedViewData) ProtoMessage() {} + +func (x *SignedViewData) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignedViewData.ProtoReflect.Descriptor instead. +func (*SignedViewData) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{8} +} + +func (x *SignedViewData) GetRawViewData() []byte { + if x != nil { + return x.RawViewData + } + return nil +} + +func (x *SignedViewData) GetSigner() uint64 { + if x != nil { + return x.Signer + } + return 0 +} + +func (x *SignedViewData) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type NewView struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SignedViewData []*SignedViewData `protobuf:"bytes,2,rep,name=signed_view_data,json=signedViewData,proto3" json:"signed_view_data,omitempty"` +} + +func (x *NewView) Reset() { + *x = NewView{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewView) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewView) ProtoMessage() {} + +func (x *NewView) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewView.ProtoReflect.Descriptor instead. +func (*NewView) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{9} +} + +func (x *NewView) GetSignedViewData() []*SignedViewData { + if x != nil { + return x.SignedViewData + } + return nil +} + +type HeartBeat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + View uint64 `protobuf:"varint,1,opt,name=view,proto3" json:"view,omitempty"` + Seq uint64 `protobuf:"varint,2,opt,name=seq,proto3" json:"seq,omitempty"` +} + +func (x *HeartBeat) Reset() { + *x = HeartBeat{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeartBeat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartBeat) ProtoMessage() {} + +func (x *HeartBeat) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartBeat.ProtoReflect.Descriptor instead. +func (*HeartBeat) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{10} +} + +func (x *HeartBeat) GetView() uint64 { + if x != nil { + return x.View + } + return 0 +} + +func (x *HeartBeat) GetSeq() uint64 { + if x != nil { + return x.Seq + } + return 0 +} + +type HeartBeatResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + View uint64 `protobuf:"varint,1,opt,name=view,proto3" json:"view,omitempty"` +} + +func (x *HeartBeatResponse) Reset() { + *x = HeartBeatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeartBeatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartBeatResponse) ProtoMessage() {} + +func (x *HeartBeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartBeatResponse.ProtoReflect.Descriptor instead. +func (*HeartBeatResponse) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{11} +} + +func (x *HeartBeatResponse) GetView() uint64 { + if x != nil { + return x.View + } + return 0 +} + +type Signature struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Signer uint64 `protobuf:"varint,1,opt,name=signer,proto3" json:"signer,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Msg []byte `protobuf:"bytes,3,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (x *Signature) Reset() { + *x = Signature{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{12} +} + +func (x *Signature) GetSigner() uint64 { + if x != nil { + return x.Signer + } + return 0 +} + +func (x *Signature) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *Signature) GetMsg() []byte { + if x != nil { + return x.Msg + } + return nil +} + +type Proposal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header []byte `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + Metadata []byte `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + VerificationSequence uint64 `protobuf:"varint,4,opt,name=verification_sequence,json=verificationSequence,proto3" json:"verification_sequence,omitempty"` +} + +func (x *Proposal) Reset() { + *x = Proposal{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Proposal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Proposal) ProtoMessage() {} + +func (x *Proposal) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Proposal.ProtoReflect.Descriptor instead. +func (*Proposal) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{13} +} + +func (x *Proposal) GetHeader() []byte { + if x != nil { + return x.Header + } + return nil +} + +func (x *Proposal) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Proposal) GetMetadata() []byte { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Proposal) GetVerificationSequence() uint64 { + if x != nil { + return x.VerificationSequence + } + return 0 +} + +type ViewMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ViewId uint64 `protobuf:"varint,1,opt,name=view_id,json=viewId,proto3" json:"view_id,omitempty"` + LatestSequence uint64 `protobuf:"varint,2,opt,name=latest_sequence,json=latestSequence,proto3" json:"latest_sequence,omitempty"` + DecisionsInView uint64 `protobuf:"varint,3,opt,name=decisions_in_view,json=decisionsInView,proto3" json:"decisions_in_view,omitempty"` + BlackList []uint64 `protobuf:"varint,4,rep,packed,name=black_list,json=blackList,proto3" json:"black_list,omitempty"` + PrevCommitSignatureDigest []byte `protobuf:"bytes,5,opt,name=prev_commit_signature_digest,json=prevCommitSignatureDigest,proto3" json:"prev_commit_signature_digest,omitempty"` +} + +func (x *ViewMetadata) Reset() { + *x = ViewMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ViewMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ViewMetadata) ProtoMessage() {} + +func (x *ViewMetadata) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ViewMetadata.ProtoReflect.Descriptor instead. +func (*ViewMetadata) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{14} +} + +func (x *ViewMetadata) GetViewId() uint64 { + if x != nil { + return x.ViewId + } + return 0 +} + +func (x *ViewMetadata) GetLatestSequence() uint64 { + if x != nil { + return x.LatestSequence + } + return 0 +} + +func (x *ViewMetadata) GetDecisionsInView() uint64 { + if x != nil { + return x.DecisionsInView + } + return 0 +} + +func (x *ViewMetadata) GetBlackList() []uint64 { + if x != nil { + return x.BlackList + } + return nil +} + +func (x *ViewMetadata) GetPrevCommitSignatureDigest() []byte { + if x != nil { + return x.PrevCommitSignatureDigest + } + return nil +} + +type SavedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Content: + // *SavedMessage_ProposedRecord + // *SavedMessage_Commit + // *SavedMessage_NewView + // *SavedMessage_ViewChange + Content isSavedMessage_Content `protobuf_oneof:"content"` +} + +func (x *SavedMessage) Reset() { + *x = SavedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SavedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SavedMessage) ProtoMessage() {} + +func (x *SavedMessage) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SavedMessage.ProtoReflect.Descriptor instead. +func (*SavedMessage) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{15} +} + +func (m *SavedMessage) GetContent() isSavedMessage_Content { + if m != nil { + return m.Content + } + return nil +} + +func (x *SavedMessage) GetProposedRecord() *ProposedRecord { + if x, ok := x.GetContent().(*SavedMessage_ProposedRecord); ok { + return x.ProposedRecord + } + return nil +} + +func (x *SavedMessage) GetCommit() *Message { + if x, ok := x.GetContent().(*SavedMessage_Commit); ok { + return x.Commit + } + return nil +} + +func (x *SavedMessage) GetNewView() *ViewMetadata { + if x, ok := x.GetContent().(*SavedMessage_NewView); ok { + return x.NewView + } + return nil +} + +func (x *SavedMessage) GetViewChange() *ViewChange { + if x, ok := x.GetContent().(*SavedMessage_ViewChange); ok { + return x.ViewChange + } + return nil +} + +type isSavedMessage_Content interface { + isSavedMessage_Content() +} + +type SavedMessage_ProposedRecord struct { + ProposedRecord *ProposedRecord `protobuf:"bytes,1,opt,name=proposed_record,json=proposedRecord,proto3,oneof"` +} + +type SavedMessage_Commit struct { + Commit *Message `protobuf:"bytes,2,opt,name=commit,proto3,oneof"` +} + +type SavedMessage_NewView struct { + NewView *ViewMetadata `protobuf:"bytes,3,opt,name=new_view,json=newView,proto3,oneof"` +} + +type SavedMessage_ViewChange struct { + ViewChange *ViewChange `protobuf:"bytes,4,opt,name=view_change,json=viewChange,proto3,oneof"` +} + +func (*SavedMessage_ProposedRecord) isSavedMessage_Content() {} + +func (*SavedMessage_Commit) isSavedMessage_Content() {} + +func (*SavedMessage_NewView) isSavedMessage_Content() {} + +func (*SavedMessage_ViewChange) isSavedMessage_Content() {} + +type StateTransferRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StateTransferRequest) Reset() { + *x = StateTransferRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateTransferRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateTransferRequest) ProtoMessage() {} + +func (x *StateTransferRequest) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateTransferRequest.ProtoReflect.Descriptor instead. +func (*StateTransferRequest) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{16} +} + +type StateTransferResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ViewNum uint64 `protobuf:"varint,1,opt,name=view_num,json=viewNum,proto3" json:"view_num,omitempty"` + Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"` +} + +func (x *StateTransferResponse) Reset() { + *x = StateTransferResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_smartbftprotos_messages_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateTransferResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateTransferResponse) ProtoMessage() {} + +func (x *StateTransferResponse) ProtoReflect() protoreflect.Message { + mi := &file_smartbftprotos_messages_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateTransferResponse.ProtoReflect.Descriptor instead. +func (*StateTransferResponse) Descriptor() ([]byte, []int) { + return file_smartbftprotos_messages_proto_rawDescGZIP(), []int{17} +} + +func (x *StateTransferResponse) GetViewNum() uint64 { + if x != nil { + return x.ViewNum + } + return 0 +} + +func (x *StateTransferResponse) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +var File_smartbftprotos_messages_proto protoreflect.FileDescriptor + +var file_smartbftprotos_messages_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x22, + 0xbe, 0x05, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x70, + 0x72, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x2e, 0x50, 0x72, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x48, 0x00, 0x52, 0x0a, + 0x70, 0x72, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x70, 0x72, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x6d, + 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x72, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, + 0x30, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x12, 0x3d, 0x0a, 0x0b, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, + 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x76, 0x69, 0x65, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x3d, 0x0a, 0x09, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, 0x44, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x76, 0x69, 0x65, 0x77, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x34, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x2e, 0x4e, 0x65, 0x77, 0x56, 0x69, 0x65, 0x77, 0x48, 0x00, 0x52, 0x07, 0x6e, 0x65, + 0x77, 0x56, 0x69, 0x65, 0x77, 0x12, 0x3a, 0x0a, 0x0a, 0x68, 0x65, 0x61, 0x72, 0x74, 0x5f, 0x62, + 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x6d, 0x61, 0x72, + 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x42, 0x65, 0x61, 0x74, 0x48, 0x00, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x42, 0x65, 0x61, + 0x74, 0x12, 0x53, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x72, 0x74, 0x5f, 0x62, 0x65, 0x61, 0x74, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x48, 0x65, 0x61, 0x72, 0x74, 0x42, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x42, 0x65, 0x61, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, + 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x14, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x5f, 0x0a, 0x17, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x15, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0xb9, 0x01, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x76, + 0x69, 0x65, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x03, 0x73, 0x65, 0x71, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, + 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, + 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x4f, 0x0a, 0x16, 0x70, + 0x72, 0x65, 0x76, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x6d, + 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x14, 0x70, 0x72, 0x65, 0x76, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x5f, 0x0a, 0x07, + 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x73, + 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, 0x71, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x73, 0x73, 0x69, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x73, 0x73, 0x69, 0x73, 0x74, 0x22, 0x80, 0x01, + 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x12, 0x3b, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x72, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x31, 0x0a, + 0x07, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, + 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x07, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x22, 0x97, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x76, + 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, + 0x10, 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, + 0x71, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, + 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x73, 0x73, 0x69, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x61, 0x73, 0x73, 0x69, 0x73, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x50, 0x72, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0a, + 0x56, 0x69, 0x65, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, + 0x65, 0x78, 0x74, 0x56, 0x69, 0x65, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, + 0xb1, 0x02, 0x0a, 0x08, 0x56, 0x69, 0x65, 0x77, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x6e, 0x65, 0x78, 0x74, 0x56, 0x69, 0x65, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, + 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x18, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x6d, 0x61, + 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x46, 0x0a, + 0x12, 0x69, 0x6e, 0x5f, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x6f, + 0x73, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x6d, 0x61, 0x72, + 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, + 0x73, 0x61, 0x6c, 0x52, 0x10, 0x69, 0x6e, 0x46, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x50, 0x72, 0x6f, + 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x5f, 0x66, 0x6c, 0x69, 0x67, + 0x68, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x69, 0x6e, 0x46, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x64, 0x22, 0x6a, 0x0a, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x69, 0x65, + 0x77, 0x44, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x61, 0x77, 0x5f, 0x76, 0x69, 0x65, + 0x77, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x61, + 0x77, 0x56, 0x69, 0x65, 0x77, 0x44, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, + 0x53, 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x56, 0x69, 0x65, 0x77, 0x12, 0x48, 0x0a, 0x10, 0x73, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x69, 0x65, 0x77, + 0x44, 0x61, 0x74, 0x61, 0x22, 0x31, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x42, 0x65, 0x61, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, 0x71, 0x22, 0x27, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x42, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x76, 0x69, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, + 0x22, 0x4b, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x73, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x8d, 0x01, + 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x15, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0xdc, 0x01, + 0x0a, 0x0c, 0x56, 0x69, 0x65, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, + 0x0a, 0x07, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x76, 0x69, 0x65, 0x77, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0e, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x69, 0x6e, + 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x64, 0x65, 0x63, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x6e, 0x56, 0x69, 0x65, 0x77, 0x12, 0x1d, 0x0a, 0x0a, + 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, + 0x52, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x70, + 0x72, 0x65, 0x76, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x91, 0x02, 0x0a, + 0x0c, 0x53, 0x61, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x49, 0x0a, + 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, + 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, + 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x6d, 0x61, 0x72, 0x74, + 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x6e, + 0x65, 0x77, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x73, 0x6d, 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x56, + 0x69, 0x65, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6e, + 0x65, 0x77, 0x56, 0x69, 0x65, 0x77, 0x12, 0x3d, 0x0a, 0x0b, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x6d, + 0x61, 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x2e, 0x56, 0x69, 0x65, + 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x76, 0x69, 0x65, 0x77, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0x16, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x69, 0x65, 0x77, 0x4e, 0x75, 0x6d, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6d, 0x61, 0x72, 0x74, 0x42, 0x46, 0x54, 0x2d, + 0x47, 0x6f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x73, 0x6d, 0x61, + 0x72, 0x74, 0x62, 0x66, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_smartbftprotos_messages_proto_rawDescOnce sync.Once + file_smartbftprotos_messages_proto_rawDescData = file_smartbftprotos_messages_proto_rawDesc +) + +func file_smartbftprotos_messages_proto_rawDescGZIP() []byte { + file_smartbftprotos_messages_proto_rawDescOnce.Do(func() { + file_smartbftprotos_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_smartbftprotos_messages_proto_rawDescData) + }) + return file_smartbftprotos_messages_proto_rawDescData +} + +var file_smartbftprotos_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_smartbftprotos_messages_proto_goTypes = []interface{}{ + (*Message)(nil), // 0: smartbftprotos.Message + (*PrePrepare)(nil), // 1: smartbftprotos.PrePrepare + (*Prepare)(nil), // 2: smartbftprotos.Prepare + (*ProposedRecord)(nil), // 3: smartbftprotos.ProposedRecord + (*Commit)(nil), // 4: smartbftprotos.Commit + (*PreparesFrom)(nil), // 5: smartbftprotos.PreparesFrom + (*ViewChange)(nil), // 6: smartbftprotos.ViewChange + (*ViewData)(nil), // 7: smartbftprotos.ViewData + (*SignedViewData)(nil), // 8: smartbftprotos.SignedViewData + (*NewView)(nil), // 9: smartbftprotos.NewView + (*HeartBeat)(nil), // 10: smartbftprotos.HeartBeat + (*HeartBeatResponse)(nil), // 11: smartbftprotos.HeartBeatResponse + (*Signature)(nil), // 12: smartbftprotos.Signature + (*Proposal)(nil), // 13: smartbftprotos.Proposal + (*ViewMetadata)(nil), // 14: smartbftprotos.ViewMetadata + (*SavedMessage)(nil), // 15: smartbftprotos.SavedMessage + (*StateTransferRequest)(nil), // 16: smartbftprotos.StateTransferRequest + (*StateTransferResponse)(nil), // 17: smartbftprotos.StateTransferResponse +} +var file_smartbftprotos_messages_proto_depIdxs = []int32{ + 1, // 0: smartbftprotos.Message.pre_prepare:type_name -> smartbftprotos.PrePrepare + 2, // 1: smartbftprotos.Message.prepare:type_name -> smartbftprotos.Prepare + 4, // 2: smartbftprotos.Message.commit:type_name -> smartbftprotos.Commit + 6, // 3: smartbftprotos.Message.view_change:type_name -> smartbftprotos.ViewChange + 8, // 4: smartbftprotos.Message.view_data:type_name -> smartbftprotos.SignedViewData + 9, // 5: smartbftprotos.Message.new_view:type_name -> smartbftprotos.NewView + 10, // 6: smartbftprotos.Message.heart_beat:type_name -> smartbftprotos.HeartBeat + 11, // 7: smartbftprotos.Message.heart_beat_response:type_name -> smartbftprotos.HeartBeatResponse + 16, // 8: smartbftprotos.Message.state_transfer_request:type_name -> smartbftprotos.StateTransferRequest + 17, // 9: smartbftprotos.Message.state_transfer_response:type_name -> smartbftprotos.StateTransferResponse + 13, // 10: smartbftprotos.PrePrepare.proposal:type_name -> smartbftprotos.Proposal + 12, // 11: smartbftprotos.PrePrepare.prev_commit_signatures:type_name -> smartbftprotos.Signature + 1, // 12: smartbftprotos.ProposedRecord.pre_prepare:type_name -> smartbftprotos.PrePrepare + 2, // 13: smartbftprotos.ProposedRecord.prepare:type_name -> smartbftprotos.Prepare + 12, // 14: smartbftprotos.Commit.signature:type_name -> smartbftprotos.Signature + 13, // 15: smartbftprotos.ViewData.last_decision:type_name -> smartbftprotos.Proposal + 12, // 16: smartbftprotos.ViewData.last_decision_signatures:type_name -> smartbftprotos.Signature + 13, // 17: smartbftprotos.ViewData.in_flight_proposal:type_name -> smartbftprotos.Proposal + 8, // 18: smartbftprotos.NewView.signed_view_data:type_name -> smartbftprotos.SignedViewData + 3, // 19: smartbftprotos.SavedMessage.proposed_record:type_name -> smartbftprotos.ProposedRecord + 0, // 20: smartbftprotos.SavedMessage.commit:type_name -> smartbftprotos.Message + 14, // 21: smartbftprotos.SavedMessage.new_view:type_name -> smartbftprotos.ViewMetadata + 6, // 22: smartbftprotos.SavedMessage.view_change:type_name -> smartbftprotos.ViewChange + 23, // [23:23] is the sub-list for method output_type + 23, // [23:23] is the sub-list for method input_type + 23, // [23:23] is the sub-list for extension type_name + 23, // [23:23] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name +} + +func init() { file_smartbftprotos_messages_proto_init() } +func file_smartbftprotos_messages_proto_init() { + if File_smartbftprotos_messages_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_smartbftprotos_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrePrepare); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Prepare); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProposedRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Commit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreparesFrom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignedViewData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NewView); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeartBeat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeartBeatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Signature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Proposal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SavedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateTransferRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_smartbftprotos_messages_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateTransferResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_smartbftprotos_messages_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Message_PrePrepare)(nil), + (*Message_Prepare)(nil), + (*Message_Commit)(nil), + (*Message_ViewChange)(nil), + (*Message_ViewData)(nil), + (*Message_NewView)(nil), + (*Message_HeartBeat)(nil), + (*Message_HeartBeatResponse)(nil), + (*Message_StateTransferRequest)(nil), + (*Message_StateTransferResponse)(nil), + } + file_smartbftprotos_messages_proto_msgTypes[15].OneofWrappers = []interface{}{ + (*SavedMessage_ProposedRecord)(nil), + (*SavedMessage_Commit)(nil), + (*SavedMessage_NewView)(nil), + (*SavedMessage_ViewChange)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_smartbftprotos_messages_proto_rawDesc, + NumEnums: 0, + NumMessages: 18, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_smartbftprotos_messages_proto_goTypes, + DependencyIndexes: file_smartbftprotos_messages_proto_depIdxs, + MessageInfos: file_smartbftprotos_messages_proto_msgTypes, + }.Build() + File_smartbftprotos_messages_proto = out.File + file_smartbftprotos_messages_proto_rawDesc = nil + file_smartbftprotos_messages_proto_goTypes = nil + file_smartbftprotos_messages_proto_depIdxs = nil +} diff --git a/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/messages.proto b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/messages.proto new file mode 100644 index 00000000000..4e6c754d6c1 --- /dev/null +++ b/vendor/github.com/SmartBFT-Go/consensus/smartbftprotos/messages.proto @@ -0,0 +1,129 @@ +// Copyright IBM Corp. All Rights Reserved. +// +// SPDX-License-Identifier: Apache-2.0 +// + +syntax = "proto3"; + + +option go_package = "github.com/SmartBFT-Go/consensus/smartbftprotos"; + + +package smartbftprotos; + +message Message { + oneof content { + PrePrepare pre_prepare = 1; + Prepare prepare = 2; + Commit commit = 3; + ViewChange view_change = 4; + SignedViewData view_data = 5; + NewView new_view = 6; + HeartBeat heart_beat = 7; + HeartBeatResponse heart_beat_response = 8; + StateTransferRequest state_transfer_request = 9; + StateTransferResponse state_transfer_response = 10; + } +} + +message PrePrepare { + uint64 view = 1; + uint64 seq = 2; + Proposal proposal = 3; + repeated Signature prev_commit_signatures = 4; +} + +message Prepare { + uint64 view = 1; + uint64 seq = 2; + string digest = 3; + bool assist = 4; +} + +message ProposedRecord { + PrePrepare pre_prepare = 1; + Prepare prepare = 2; +} + +message Commit { + uint64 view = 1; + uint64 seq = 2; + string digest = 3; + Signature signature = 4; + bool assist = 5; +} + +message PreparesFrom { + repeated uint64 ids = 1; +} + +message ViewChange { + uint64 next_view = 1; + string reason = 2; +} + +message ViewData { + uint64 next_view = 1; + Proposal last_decision = 2; + repeated Signature last_decision_signatures = 3; + Proposal in_flight_proposal = 4; + bool in_flight_prepared = 5; +} + +message SignedViewData { + bytes raw_view_data = 1; + uint64 signer = 2; + bytes signature = 3; +} + +message NewView { + repeated SignedViewData signed_view_data = 2; +} + +message HeartBeat { + uint64 view = 1; + uint64 seq = 2; +} + +message HeartBeatResponse { + uint64 view = 1; +} + +message Signature { + uint64 signer = 1; + bytes value = 2; + bytes msg = 3; +} + +message Proposal { + bytes header = 1; + bytes payload = 2; + bytes metadata = 3; + uint64 verification_sequence = 4; +} + +message ViewMetadata { + uint64 view_id = 1; + uint64 latest_sequence = 2; + uint64 decisions_in_view = 3; + repeated uint64 black_list = 4; + bytes prev_commit_signature_digest = 5; +} + +message SavedMessage { + oneof content { + ProposedRecord proposed_record = 1; + Message commit = 2; + ViewMetadata new_view = 3; + ViewChange view_change = 4; + } +} + +message StateTransferRequest { + +} + +message StateTransferResponse { + uint64 view_num = 1; + uint64 sequence = 2; +} \ No newline at end of file diff --git a/vendor/github.com/hyperledger/fabric-protos-go/orderer/smartbft/configuration.pb.go b/vendor/github.com/hyperledger/fabric-protos-go/orderer/smartbft/configuration.pb.go new file mode 100644 index 00000000000..e34db04081f --- /dev/null +++ b/vendor/github.com/hyperledger/fabric-protos-go/orderer/smartbft/configuration.pb.go @@ -0,0 +1,278 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: orderer/smartbft/configuration.proto + +package smartbft + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Options_Rotation int32 + +const ( + Options_ROTATION_UNSPECIFIED Options_Rotation = 0 + Options_ROTATION_OFF Options_Rotation = 1 + Options_ROTATION_ON Options_Rotation = 2 +) + +var Options_Rotation_name = map[int32]string{ + 0: "ROTATION_UNSPECIFIED", + 1: "ROTATION_OFF", + 2: "ROTATION_ON", +} + +var Options_Rotation_value = map[string]int32{ + "ROTATION_UNSPECIFIED": 0, + "ROTATION_OFF": 1, + "ROTATION_ON": 2, +} + +func (x Options_Rotation) String() string { + return proto.EnumName(Options_Rotation_name, int32(x)) +} + +func (Options_Rotation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a8a81ac5a2771ff3, []int{0, 0} +} + +// Options to be specified for all the smartbft nodes. These can be modified on a +// per-channel basis. +type Options struct { + RequestBatchMaxCount uint64 `protobuf:"varint,1,opt,name=request_batch_max_count,json=requestBatchMaxCount,proto3" json:"request_batch_max_count,omitempty"` + RequestBatchMaxBytes uint64 `protobuf:"varint,2,opt,name=request_batch_max_bytes,json=requestBatchMaxBytes,proto3" json:"request_batch_max_bytes,omitempty"` + RequestBatchMaxInterval string `protobuf:"bytes,3,opt,name=request_batch_max_interval,json=requestBatchMaxInterval,proto3" json:"request_batch_max_interval,omitempty"` + IncomingMessageBufferSize uint64 `protobuf:"varint,4,opt,name=incoming_message_buffer_size,json=incomingMessageBufferSize,proto3" json:"incoming_message_buffer_size,omitempty"` + RequestPoolSize uint64 `protobuf:"varint,5,opt,name=request_pool_size,json=requestPoolSize,proto3" json:"request_pool_size,omitempty"` + RequestForwardTimeout string `protobuf:"bytes,6,opt,name=request_forward_timeout,json=requestForwardTimeout,proto3" json:"request_forward_timeout,omitempty"` + RequestComplainTimeout string `protobuf:"bytes,7,opt,name=request_complain_timeout,json=requestComplainTimeout,proto3" json:"request_complain_timeout,omitempty"` + RequestAutoRemoveTimeout string `protobuf:"bytes,8,opt,name=request_auto_remove_timeout,json=requestAutoRemoveTimeout,proto3" json:"request_auto_remove_timeout,omitempty"` + RequestMaxBytes uint64 `protobuf:"varint,9,opt,name=request_max_bytes,json=requestMaxBytes,proto3" json:"request_max_bytes,omitempty"` + ViewChangeResendInterval string `protobuf:"bytes,10,opt,name=view_change_resend_interval,json=viewChangeResendInterval,proto3" json:"view_change_resend_interval,omitempty"` + ViewChangeTimeout string `protobuf:"bytes,11,opt,name=view_change_timeout,json=viewChangeTimeout,proto3" json:"view_change_timeout,omitempty"` + LeaderHeartbeatTimeout string `protobuf:"bytes,12,opt,name=leader_heartbeat_timeout,json=leaderHeartbeatTimeout,proto3" json:"leader_heartbeat_timeout,omitempty"` + LeaderHeartbeatCount uint64 `protobuf:"varint,13,opt,name=leader_heartbeat_count,json=leaderHeartbeatCount,proto3" json:"leader_heartbeat_count,omitempty"` + CollectTimeout string `protobuf:"bytes,14,opt,name=collect_timeout,json=collectTimeout,proto3" json:"collect_timeout,omitempty"` + SyncOnStart bool `protobuf:"varint,15,opt,name=sync_on_start,json=syncOnStart,proto3" json:"sync_on_start,omitempty"` + SpeedUpViewChange bool `protobuf:"varint,16,opt,name=speed_up_view_change,json=speedUpViewChange,proto3" json:"speed_up_view_change,omitempty"` + LeaderRotation Options_Rotation `protobuf:"varint,17,opt,name=leader_rotation,json=leaderRotation,proto3,enum=orderer.smartbft.Options_Rotation" json:"leader_rotation,omitempty"` + DecisionsPerLeader uint64 `protobuf:"varint,18,opt,name=decisions_per_leader,json=decisionsPerLeader,proto3" json:"decisions_per_leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Options) Reset() { *m = Options{} } +func (m *Options) String() string { return proto.CompactTextString(m) } +func (*Options) ProtoMessage() {} +func (*Options) Descriptor() ([]byte, []int) { + return fileDescriptor_a8a81ac5a2771ff3, []int{0} +} + +func (m *Options) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Options.Unmarshal(m, b) +} +func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Options.Marshal(b, m, deterministic) +} +func (m *Options) XXX_Merge(src proto.Message) { + xxx_messageInfo_Options.Merge(m, src) +} +func (m *Options) XXX_Size() int { + return xxx_messageInfo_Options.Size(m) +} +func (m *Options) XXX_DiscardUnknown() { + xxx_messageInfo_Options.DiscardUnknown(m) +} + +var xxx_messageInfo_Options proto.InternalMessageInfo + +func (m *Options) GetRequestBatchMaxCount() uint64 { + if m != nil { + return m.RequestBatchMaxCount + } + return 0 +} + +func (m *Options) GetRequestBatchMaxBytes() uint64 { + if m != nil { + return m.RequestBatchMaxBytes + } + return 0 +} + +func (m *Options) GetRequestBatchMaxInterval() string { + if m != nil { + return m.RequestBatchMaxInterval + } + return "" +} + +func (m *Options) GetIncomingMessageBufferSize() uint64 { + if m != nil { + return m.IncomingMessageBufferSize + } + return 0 +} + +func (m *Options) GetRequestPoolSize() uint64 { + if m != nil { + return m.RequestPoolSize + } + return 0 +} + +func (m *Options) GetRequestForwardTimeout() string { + if m != nil { + return m.RequestForwardTimeout + } + return "" +} + +func (m *Options) GetRequestComplainTimeout() string { + if m != nil { + return m.RequestComplainTimeout + } + return "" +} + +func (m *Options) GetRequestAutoRemoveTimeout() string { + if m != nil { + return m.RequestAutoRemoveTimeout + } + return "" +} + +func (m *Options) GetRequestMaxBytes() uint64 { + if m != nil { + return m.RequestMaxBytes + } + return 0 +} + +func (m *Options) GetViewChangeResendInterval() string { + if m != nil { + return m.ViewChangeResendInterval + } + return "" +} + +func (m *Options) GetViewChangeTimeout() string { + if m != nil { + return m.ViewChangeTimeout + } + return "" +} + +func (m *Options) GetLeaderHeartbeatTimeout() string { + if m != nil { + return m.LeaderHeartbeatTimeout + } + return "" +} + +func (m *Options) GetLeaderHeartbeatCount() uint64 { + if m != nil { + return m.LeaderHeartbeatCount + } + return 0 +} + +func (m *Options) GetCollectTimeout() string { + if m != nil { + return m.CollectTimeout + } + return "" +} + +func (m *Options) GetSyncOnStart() bool { + if m != nil { + return m.SyncOnStart + } + return false +} + +func (m *Options) GetSpeedUpViewChange() bool { + if m != nil { + return m.SpeedUpViewChange + } + return false +} + +func (m *Options) GetLeaderRotation() Options_Rotation { + if m != nil { + return m.LeaderRotation + } + return Options_ROTATION_UNSPECIFIED +} + +func (m *Options) GetDecisionsPerLeader() uint64 { + if m != nil { + return m.DecisionsPerLeader + } + return 0 +} + +func init() { + proto.RegisterEnum("orderer.smartbft.Options_Rotation", Options_Rotation_name, Options_Rotation_value) + proto.RegisterType((*Options)(nil), "orderer.smartbft.Options") +} + +func init() { + proto.RegisterFile("orderer/smartbft/configuration.proto", fileDescriptor_a8a81ac5a2771ff3) +} + +var fileDescriptor_a8a81ac5a2771ff3 = []byte{ + // 615 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x94, 0x5d, 0x6f, 0xd3, 0x30, + 0x14, 0x86, 0xe9, 0x18, 0x5b, 0xe7, 0x6d, 0xfd, 0x08, 0x65, 0x0b, 0x8c, 0x8b, 0xaa, 0x42, 0xa2, + 0xe2, 0x22, 0x41, 0xc0, 0x10, 0x12, 0x9a, 0xd0, 0x5a, 0x56, 0xa8, 0x60, 0xeb, 0x94, 0x6d, 0x5c, + 0x70, 0x63, 0x39, 0xc9, 0x69, 0x6a, 0x29, 0x89, 0x83, 0xed, 0xec, 0xeb, 0xef, 0xf0, 0x47, 0x51, + 0xec, 0x38, 0x2d, 0xed, 0x2e, 0xe3, 0xe7, 0x7d, 0x72, 0x7c, 0xe2, 0xe3, 0xa0, 0x57, 0x8c, 0x87, + 0xc0, 0x81, 0xbb, 0x22, 0x21, 0x5c, 0xfa, 0x53, 0xe9, 0x06, 0x2c, 0x9d, 0xd2, 0x28, 0xe7, 0x44, + 0x52, 0x96, 0x3a, 0x19, 0x67, 0x92, 0x59, 0xad, 0x32, 0xe5, 0x98, 0x54, 0xef, 0x6f, 0x1d, 0x6d, + 0x4e, 0xb2, 0x22, 0x22, 0xac, 0x43, 0xb4, 0xcf, 0xe1, 0x4f, 0x0e, 0x42, 0x62, 0x9f, 0xc8, 0x60, + 0x86, 0x13, 0x72, 0x8b, 0x03, 0x96, 0xa7, 0xd2, 0xae, 0x75, 0x6b, 0xfd, 0x75, 0xaf, 0x53, 0xe2, + 0x41, 0x41, 0x4f, 0xc9, 0xed, 0xb0, 0x60, 0x0f, 0x6b, 0xfe, 0x9d, 0x04, 0x61, 0xaf, 0x3d, 0xa8, + 0x0d, 0x0a, 0x66, 0x7d, 0x46, 0x2f, 0x56, 0x35, 0x9a, 0x4a, 0xe0, 0xd7, 0x24, 0xb6, 0x1f, 0x77, + 0x6b, 0xfd, 0x2d, 0x6f, 0x7f, 0xc9, 0x1c, 0x97, 0xd8, 0xfa, 0x82, 0x5e, 0xd2, 0x34, 0x60, 0x09, + 0x4d, 0x23, 0x9c, 0x80, 0x10, 0x24, 0x02, 0xec, 0xe7, 0xd3, 0x29, 0x70, 0x2c, 0xe8, 0x3d, 0xd8, + 0xeb, 0xaa, 0xf0, 0x73, 0x93, 0x39, 0xd5, 0x91, 0x81, 0x4a, 0x5c, 0xd0, 0x7b, 0xb0, 0xde, 0xa0, + 0xb6, 0xa9, 0x9e, 0x31, 0x16, 0x6b, 0xeb, 0x89, 0xb2, 0x9a, 0x25, 0x38, 0x67, 0x2c, 0x56, 0xd9, + 0x8f, 0xf3, 0x06, 0xa7, 0x8c, 0xdf, 0x10, 0x1e, 0x62, 0x49, 0x13, 0x60, 0xb9, 0xb4, 0x37, 0xd4, + 0x36, 0x9f, 0x95, 0x78, 0xa4, 0xe9, 0xa5, 0x86, 0xd6, 0x27, 0x64, 0x1b, 0x2f, 0x60, 0x49, 0x16, + 0x13, 0x9a, 0x56, 0xe2, 0xa6, 0x12, 0xf7, 0x4a, 0x3e, 0x2c, 0xb1, 0x31, 0x8f, 0xd0, 0x81, 0x31, + 0x49, 0x2e, 0x19, 0xe6, 0x90, 0xb0, 0x6b, 0xa8, 0xe4, 0xba, 0x92, 0xcd, 0xcb, 0x8f, 0x73, 0xc9, + 0x3c, 0x15, 0x30, 0xfa, 0x42, 0x73, 0xf3, 0xb3, 0xd8, 0xfa, 0xaf, 0xb9, 0xea, 0x18, 0x8e, 0xd0, + 0xc1, 0x35, 0x85, 0x1b, 0x1c, 0xcc, 0x48, 0x1a, 0x01, 0xe6, 0x20, 0x20, 0x0d, 0xe7, 0xe7, 0x80, + 0x74, 0xa9, 0x22, 0x32, 0x54, 0x09, 0x4f, 0x05, 0xaa, 0x83, 0x70, 0xd0, 0xd3, 0x45, 0xdd, 0xec, + 0x70, 0x5b, 0x69, 0xed, 0xb9, 0xb6, 0xf0, 0x4d, 0x62, 0x20, 0x21, 0x70, 0x3c, 0x83, 0x62, 0x04, + 0x81, 0xc8, 0x4a, 0xda, 0xd1, 0xdf, 0x44, 0xf3, 0xef, 0x06, 0x1b, 0xf3, 0x03, 0xda, 0x5b, 0x31, + 0xf5, 0x70, 0xee, 0xea, 0x29, 0x5b, 0xf2, 0xf4, 0x70, 0xbe, 0x46, 0xcd, 0x80, 0xc5, 0x31, 0x04, + 0xf3, 0x32, 0x0d, 0x55, 0xa6, 0x51, 0x2e, 0x9b, 0xd7, 0xf7, 0xd0, 0xae, 0xb8, 0x4b, 0x03, 0xcc, + 0x52, 0x2c, 0x24, 0xe1, 0xd2, 0x6e, 0x76, 0x6b, 0xfd, 0xba, 0xb7, 0x5d, 0x2c, 0x4e, 0xd2, 0x8b, + 0x62, 0xc9, 0x72, 0x51, 0x47, 0x64, 0x00, 0x21, 0xce, 0x33, 0xbc, 0xd0, 0xb5, 0xdd, 0x52, 0xd1, + 0xb6, 0x62, 0x57, 0xd9, 0xaf, 0xaa, 0x69, 0xeb, 0x07, 0x6a, 0x96, 0x7b, 0xe6, 0x4c, 0xaa, 0x8b, + 0x68, 0xb7, 0xbb, 0xb5, 0x7e, 0xe3, 0x5d, 0xcf, 0x59, 0xbe, 0x89, 0x4e, 0x79, 0x0b, 0x1d, 0xaf, + 0x4c, 0x7a, 0x0d, 0xad, 0x9a, 0x67, 0xeb, 0x2d, 0xea, 0x84, 0x10, 0x50, 0x51, 0xa4, 0x70, 0x06, + 0x1c, 0x6b, 0x6e, 0x5b, 0xaa, 0x7d, 0xab, 0x62, 0xe7, 0xc0, 0x7f, 0x2a, 0xd2, 0xfb, 0x86, 0xea, + 0x95, 0x6d, 0xa3, 0x8e, 0x37, 0xb9, 0x3c, 0xbe, 0x1c, 0x4f, 0xce, 0xf0, 0xd5, 0xd9, 0xc5, 0xf9, + 0xc9, 0x70, 0x3c, 0x1a, 0x9f, 0x7c, 0x6d, 0x3d, 0xb2, 0x5a, 0x68, 0xa7, 0x22, 0x93, 0xd1, 0xa8, + 0x55, 0xb3, 0x9a, 0x68, 0x7b, 0xbe, 0x72, 0xd6, 0x5a, 0x1b, 0x44, 0xc8, 0x61, 0x3c, 0x72, 0x66, + 0x77, 0x19, 0xf0, 0x18, 0xc2, 0x08, 0xb8, 0x33, 0x25, 0x3e, 0xa7, 0x81, 0xfe, 0xaf, 0x88, 0x95, + 0x6e, 0x7e, 0x1f, 0x46, 0x54, 0xce, 0x72, 0xdf, 0x09, 0x58, 0xe2, 0x2e, 0x68, 0xae, 0xd6, 0x5c, + 0xad, 0xb9, 0xcb, 0x3f, 0x2d, 0x7f, 0x43, 0x81, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x01, + 0x59, 0x5f, 0x15, 0xcf, 0x04, 0x00, 0x00, +} diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 3b99bf0ac84..fdfef8808ab 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,5 +1,49 @@ # Changelog +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + ## 1.17.0 (25 May 2021) Bugfixes: diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 3f4b86e081f..9e929cd98e6 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -106,6 +106,24 @@ func (b *Buffer) Write(bs []byte) (int, error) { return len(bs), nil } +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + // TrimNewline trims any final "\n" byte from the end of the buffer. func (b *Buffer) TrimNewline() { if i := len(b.bs) - 1; i >= 0 { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 553f258e74a..f116bd936fe 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -26,7 +26,6 @@ import ( "os" "runtime" "strings" - "time" "go.uber.org/zap/zapcore" ) @@ -51,6 +50,8 @@ type Logger struct { addStack zapcore.LevelEnabler callerSkip int + + clock zapcore.Clock } // New constructs a new Logger from the provided zapcore.Core and Options. If @@ -71,6 +72,7 @@ func New(core zapcore.Core, options ...Option) *Logger { core: core, errorOutput: zapcore.Lock(os.Stderr), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } return log.WithOptions(options...) } @@ -85,6 +87,7 @@ func NewNop() *Logger { core: zapcore.NewNopCore(), errorOutput: zapcore.AddSync(ioutil.Discard), addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, } } @@ -270,7 +273,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // log message will actually be written somewhere. ent := zapcore.Entry{ LoggerName: log.name, - Time: time.Now(), + Time: log.clock.Now(), Level: lvl, Message: msg, } @@ -307,7 +310,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { if log.addCaller { frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) if !defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index 0135c20923f..e9e66161f51 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -138,3 +138,11 @@ func OnFatal(action zapcore.CheckWriteAction) Option { log.onFatal = action }) } + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 4084dada79c..0b9651981a9 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -266,7 +266,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // Make sure this element isn't a dangling key. if i == len(args)-1 { - s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) break } @@ -287,7 +287,7 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { // If we encountered any invalid key-value pairs, log an error. if len(invalid) > 0 { - s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) } return fields } diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 00000000000..ef2f7d9637b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,188 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 00000000000..d2ea95b394b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" +) + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 4aa8b4f90bd..0885505b75b 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -208,7 +208,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // If the entry is dirty, log an internal error; because the // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) ce.ErrorOutput.Sync() } return @@ -219,11 +219,9 @@ func (ce *CheckedEntry) Write(fields ...Field) { for i := range ce.cores { err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) } - if ce.ErrorOutput != nil { - if err != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) - ce.ErrorOutput.Sync() - } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() } should, msg := ce.should, ce.Message diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index f2a07d78641..74919b0ccb1 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -83,7 +83,7 @@ type errorGroup interface { Errors() []error } -// Note that errArry and errArrayElem are very similar to the version +// Note that errArray and errArrayElem are very similar to the version // implemented in the top-level error.go file. We can't re-use this because // that would require exporting errArray as part of the zapcore API. diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index 25f10ca1d75..31ed96e129f 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -197,12 +197,14 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { return ce } - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - s.hook(ent, LogDropped) - return ce + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) } - s.hook(ent, LogSampled) return s.Core.Check(ent, ce) } diff --git a/vendor/go.uber.org/zap/zaptest/observer/observer.go b/vendor/go.uber.org/zap/zaptest/observer/observer.go index 6ae58f5d6a6..03866bd91b2 100644 --- a/vendor/go.uber.org/zap/zaptest/observer/observer.go +++ b/vendor/go.uber.org/zap/zaptest/observer/observer.go @@ -19,7 +19,7 @@ // THE SOFTWARE. // Package observer provides a zapcore.Core that keeps an in-memory, -// encoding-agnostic repesentation of log entries. It's useful for +// encoding-agnostic representation of log entries. It's useful for // applications that want to unit test their log output without tying their // tests to a particular output encoding. package observer // import "go.uber.org/zap/zaptest/observer" @@ -78,23 +78,30 @@ func (o *ObservedLogs) AllUntimed() []LoggedEntry { return ret } +// FilterLevelExact filters entries to those logged at exactly the given level. +func (o *ObservedLogs) FilterLevelExact(level zapcore.Level) *ObservedLogs { + return o.Filter(func(e LoggedEntry) bool { + return e.Level == level + }) +} + // FilterMessage filters entries to those that have the specified message. func (o *ObservedLogs) FilterMessage(msg string) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { return e.Message == msg }) } // FilterMessageSnippet filters entries to those that have a message containing the specified snippet. func (o *ObservedLogs) FilterMessageSnippet(snippet string) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { return strings.Contains(e.Message, snippet) }) } // FilterField filters entries to those that have the specified field. func (o *ObservedLogs) FilterField(field zapcore.Field) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { for _, ctxField := range e.Context { if ctxField.Equals(field) { return true @@ -106,7 +113,7 @@ func (o *ObservedLogs) FilterField(field zapcore.Field) *ObservedLogs { // FilterFieldKey filters entries to those that have the specified key. func (o *ObservedLogs) FilterFieldKey(key string) *ObservedLogs { - return o.filter(func(e LoggedEntry) bool { + return o.Filter(func(e LoggedEntry) bool { for _, ctxField := range e.Context { if ctxField.Key == key { return true @@ -116,13 +123,15 @@ func (o *ObservedLogs) FilterFieldKey(key string) *ObservedLogs { }) } -func (o *ObservedLogs) filter(match func(LoggedEntry) bool) *ObservedLogs { +// Filter returns a copy of this ObservedLogs containing only those entries +// for which the provided function returns true. +func (o *ObservedLogs) Filter(keep func(LoggedEntry) bool) *ObservedLogs { o.mu.RLock() defer o.mu.RUnlock() var filtered []LoggedEntry for _, entry := range o.logs { - if match(entry) { + if keep(entry) { filtered = append(filtered, entry) } } diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 00000000000..15167cd746c --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 00000000000..1c4577e9680 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000000..30f632c577b --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0d6f34766e0..4bb998b0560 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -56,6 +56,14 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion +# github.com/SmartBFT-Go/consensus v0.3.0 +## explicit; go 1.16 +github.com/SmartBFT-Go/consensus/internal/bft +github.com/SmartBFT-Go/consensus/pkg/api +github.com/SmartBFT-Go/consensus/pkg/consensus +github.com/SmartBFT-Go/consensus/pkg/types +github.com/SmartBFT-Go/consensus/pkg/wal +github.com/SmartBFT-Go/consensus/smartbftprotos # github.com/VictoriaMetrics/fastcache v1.9.0 ## explicit; go 1.13 github.com/VictoriaMetrics/fastcache @@ -221,7 +229,7 @@ github.com/hyperledger/fabric-config/protolator/protoext/peerext # github.com/hyperledger/fabric-lib-go v1.0.0 ## explicit github.com/hyperledger/fabric-lib-go/healthz -# github.com/hyperledger/fabric-protos-go v0.0.0-20220827195505-ce4c067a561d +# github.com/hyperledger/fabric-protos-go v0.0.0-20221109160343-add83d6f2564 ## explicit; go 1.12 github.com/hyperledger/fabric-protos-go/common github.com/hyperledger/fabric-protos-go/discovery @@ -233,6 +241,7 @@ github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset github.com/hyperledger/fabric-protos-go/msp github.com/hyperledger/fabric-protos-go/orderer github.com/hyperledger/fabric-protos-go/orderer/etcdraft +github.com/hyperledger/fabric-protos-go/orderer/smartbft github.com/hyperledger/fabric-protos-go/peer github.com/hyperledger/fabric-protos-go/peer/lifecycle github.com/hyperledger/fabric-protos-go/transientstore @@ -454,7 +463,7 @@ go.uber.org/atomic # go.uber.org/multierr v1.6.0 ## explicit; go 1.12 go.uber.org/multierr -# go.uber.org/zap v1.17.0 +# go.uber.org/zap v1.19.0 ## explicit; go 1.13 go.uber.org/zap go.uber.org/zap/buffer @@ -481,6 +490,9 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +## explicit +golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a ## explicit; go 1.17 golang.org/x/sys/cpu