From 5b1ac91b5ce05da4f49818e59a10ecdeab55330c Mon Sep 17 00:00:00 2001 From: Yoav Tock Date: Wed, 15 Nov 2023 14:58:46 +0200 Subject: [PATCH] BFT Block Puller: BFTDeliverer (#4365) * BFT Block Puller: censorship monitor BFT header receivers that pull headers from the orderers, and keep the time and number of the last one. The header receivers verify each block as it arrives. The header receivers will receive (or ask for) full config blocks - in a later commit. The header receivers will maintain their own private block verifier (bundle) - in a later commit. A BFT censorship monitor which periodically compares the progress of headers relative to blocks. The monitor triggers an alert if a header is ahead of the block stream for more than a certain time. The BFTDeliverer is only a skeleton Signed-off-by: Yoav Tock Change-Id: I5180a98640073b87effb478fd86b6fa7d4df5dee * BFT Block Puller: BFTDeliverer - A BFTDeliverer that fetches blocks and maintains a BFTCensorshipMonitor. - Abstract the creation of a BFTCensorshipMonitor via an abstract factory, so that we can use a mock for it in testing. - Add a "shuffledEndpoints" method to the connection source and test it. - Unit testing of BFTDeliverer. Signed-off-by: Yoav Tock Change-Id: Ifead3f9e6c803c4d9fabc63acce11c6da472b88d --------- Signed-off-by: Yoav Tock --- .../blocksprovider/bft_censorship_monitor.go | 407 ++++++ .../bft_censorship_monitor_factory.go | 25 + .../bft_censorship_monitor_factory_test.go | 21 + .../bft_censorship_monitor_test.go | 770 ++++++++++++ .../pkg/peer/blocksprovider/bft_deliverer.go | 303 +++-- .../peer/blocksprovider/bft_deliverer_test.go | 1115 +++++++++++++++-- .../blocksprovider/bft_header_receiver.go | 209 +++ .../bft_header_receiver_test.go | 310 +++++ .../pkg/peer/blocksprovider/block_receiver.go | 4 +- internal/pkg/peer/blocksprovider/deliverer.go | 4 +- .../pkg/peer/blocksprovider/deliverer_test.go | 2 +- .../peer/blocksprovider/delivery_requester.go | 50 +- .../fake/block_progress_reporter.go | 108 ++ .../blocksprovider/fake/block_verifier.go | 14 +- .../fake/censorship_detector.go | 162 +++ .../fake/censorship_detector_factory.go | 129 ++ .../fake/deliver_client_requester.go | 205 +++ .../blocksprovider/fake/deliver_streamer.go | 7 +- .../pkg/peer/blocksprovider/fake/dialer.go | 7 +- .../fake/duration_exceeded_handler.go | 102 ++ .../fake/gossip_service_adapter.go | 10 +- .../peer/blocksprovider/fake/ledger_info.go | 7 +- .../fake/orderer_connection_source.go | 132 +- .../pkg/peer/blocksprovider/timeout_config.go | 36 + internal/pkg/peer/blocksprovider/util.go | 69 +- internal/pkg/peer/blocksprovider/util_test.go | 61 + internal/pkg/peer/orderers/connection.go | 15 + internal/pkg/peer/orderers/connection_test.go | 34 + 28 files changed, 4020 insertions(+), 298 deletions(-) create mode 100644 internal/pkg/peer/blocksprovider/bft_censorship_monitor.go create mode 100644 internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory.go create mode 100644 internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory_test.go create mode 100644 internal/pkg/peer/blocksprovider/bft_censorship_monitor_test.go create mode 100644 internal/pkg/peer/blocksprovider/bft_header_receiver.go create mode 100644 internal/pkg/peer/blocksprovider/bft_header_receiver_test.go create mode 100644 internal/pkg/peer/blocksprovider/fake/block_progress_reporter.go create mode 100644 internal/pkg/peer/blocksprovider/fake/censorship_detector.go create mode 100644 internal/pkg/peer/blocksprovider/fake/censorship_detector_factory.go create mode 100644 internal/pkg/peer/blocksprovider/fake/deliver_client_requester.go create mode 100644 internal/pkg/peer/blocksprovider/fake/duration_exceeded_handler.go create mode 100644 internal/pkg/peer/blocksprovider/timeout_config.go create mode 100644 internal/pkg/peer/blocksprovider/util_test.go diff --git a/internal/pkg/peer/blocksprovider/bft_censorship_monitor.go b/internal/pkg/peer/blocksprovider/bft_censorship_monitor.go new file mode 100644 index 00000000000..3ad302fbd21 --- /dev/null +++ b/internal/pkg/peer/blocksprovider/bft_censorship_monitor.go @@ -0,0 +1,407 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider + +import ( + "container/list" + "fmt" + "strings" + "sync" + "time" + + "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/internal/pkg/peer/orderers" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" +) + +// BlockProgressReporter provides information on the last block fetched from an orderer. +// +//go:generate counterfeiter -o fake/block_progress_reporter.go --fake-name BlockProgressReporter . BlockProgressReporter +type BlockProgressReporter interface { + // BlockProgress returns the last block fetched from an orderer, and the time it was fetched. + // If the fetch time IsZero == true, no block had been fetched yet (block number will always be zero in that case). + BlockProgress() (uint64, time.Time) +} + +// DeliverClientRequester connects to an orderer, requests a stream of blocks or headers, and provides a deliver client. +// +//go:generate counterfeiter -o fake/deliver_client_requester.go --fake-name DeliverClientRequester . DeliverClientRequester +type DeliverClientRequester interface { + SeekInfoHeadersFrom(ledgerHeight uint64) (*common.Envelope, error) + Connect(seekInfoEnv *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) +} + +// BFTCensorshipMonitor monitors the progress of headers receivers versus the progress of the block receiver. +// We ask for a stream of headers from all sources except the one supplying blocks. +// We track the progress of header receivers against the block reception progress. +// If there is a header that is ahead of the last block, and a timeout had passed since that header was received, we +// declare that censorship was detected. +// When censorship is detected, ErrCensorship is sent to the errorCh which can be read by ErrorsChannel() method. +type BFTCensorshipMonitor struct { + chainID string + headerVerifier BlockVerifier + requester DeliverClientRequester + fetchSources []*orderers.Endpoint + blockSourceIndex int + + timeoutConfig TimeoutConfig + stopHistoryWindowDur time.Duration + + progressReporter BlockProgressReporter // Provides the last block number and time + suspicion bool // If a suspicion is pending + suspicionTime time.Time // The reception time of the header that is ahead of the block receiver + suspicionBlockNumber uint64 // The block number of the header that is ahead of the block receiver + + mutex sync.Mutex + stopFlag bool + stopCh chan struct{} + errorCh chan error + hdrRcvTrackers map[string]*headerReceiverTracker + + logger *flogging.FabricLogger +} + +const logTimeFormat = "2006-01-02T15:04:05.000" + +type headerReceiverTracker struct { + headerReceiver *BFTHeaderReceiver + connectFailureCounter uint // the number of consecutive unsuccessful attempts to connect to an orderer + stopTimes *list.List // the sequence of times a receiver had stopped due to Recv() or verification errors + retryDeadline time.Time // do not try to connect & restart before the deadline +} + +func (tracker *headerReceiverTracker) pruneOlderThan(u time.Time) { + if tracker.stopTimes == nil { + tracker.stopTimes = list.New() + return + } + + for e := tracker.stopTimes.Front(); e != nil; e = e.Next() { + if s := e.Value.(time.Time); s.Before(u) { + tracker.stopTimes.Remove(e) + } else { + break + } + } +} + +func (tracker *headerReceiverTracker) appendIfNewer(u time.Time) bool { + if tracker.stopTimes == nil { + tracker.stopTimes = list.New() + } + + if tracker.stopTimes.Len() == 0 { + tracker.stopTimes.PushBack(u) + return true + } + + if e := tracker.stopTimes.Back().Value.(time.Time); e.Before(u) { + tracker.stopTimes.PushBack(u) + return true + } + + return false +} + +func NewBFTCensorshipMonitor( + chainID string, + verifier BlockVerifier, + requester DeliverClientRequester, + progressReporter BlockProgressReporter, + fetchSources []*orderers.Endpoint, + blockSourceIndex int, + timeoutConf TimeoutConfig, +) *BFTCensorshipMonitor { + timeoutConf.ApplyDefaults() + // This window is calculated such that if a receiver continuously fails when the retry interval was scaled up to + // MaxRetryInterval, the retry interval will stay at MaxRetryInterval. + stopWindowDur := time.Duration(int64(numRetries2Max(2.0, timeoutConf.MinRetryInterval, timeoutConf.MaxRetryInterval)+1) * timeoutConf.MaxRetryInterval.Nanoseconds()) + + m := &BFTCensorshipMonitor{ + chainID: chainID, + headerVerifier: verifier, + requester: requester, + fetchSources: fetchSources, + progressReporter: progressReporter, + stopCh: make(chan struct{}), + errorCh: make(chan error, 1), // Buffered to allow the Monitor() goroutine to exit without waiting + hdrRcvTrackers: make(map[string]*headerReceiverTracker), + blockSourceIndex: blockSourceIndex, + timeoutConfig: timeoutConf, + stopHistoryWindowDur: stopWindowDur, + logger: flogging.MustGetLogger("BFTCensorshipMonitor").With("channel", chainID), + } + + return m +} + +// Monitor the progress of headers and compare to the progress of block fetching, trying to detect block censorship. +// Continuously try and relaunch the goroutines that monitor individual orderers. If an orderer is faulty we increase +// the interval between retries but never quit. +// +// This method should be run using a dedicated goroutine. +func (m *BFTCensorshipMonitor) Monitor() { + m.logger.Debug("Starting to monitor block and header fetching progress") + defer func() { + m.logger.Debug("Stopping to monitor block and header fetching progress") + }() + + for i, ep := range m.fetchSources { + if i == m.blockSourceIndex { + continue + } + m.hdrRcvTrackers[ep.Address] = &headerReceiverTracker{} + } + + for { + if err := m.launchHeaderReceivers(); err != nil { + m.logger.Warningf("Failure while launching header receivers: %s", err) + m.errorCh <- &ErrFatal{Message: err.Error()} + m.Stop() + return + } + + select { + case <-m.stopCh: + m.errorCh <- &ErrStopping{Message: "received a stop signal"} + return + case <-time.After(m.timeoutConfig.BlockCensorshipTimeout / 100): + if m.detectBlockCensorship() { + m.logger.Warningf("Block censorship detected, block source endpoint: %s", m.fetchSources[m.blockSourceIndex]) + m.errorCh <- &ErrCensorship{Message: fmt.Sprintf("block censorship detected, endpoint: %s", m.fetchSources[m.blockSourceIndex])} + m.Stop() + return + } + } + } +} + +func (m *BFTCensorshipMonitor) ErrorsChannel() <-chan error { + return m.errorCh +} + +func (m *BFTCensorshipMonitor) Stop() { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.stopFlag { + return + } + + m.stopFlag = true + close(m.stopCh) + for _, hRcvMon := range m.hdrRcvTrackers { + if hRcvMon.headerReceiver != nil { + _ = hRcvMon.headerReceiver.Stop() + } + } +} + +func (m *BFTCensorshipMonitor) detectBlockCensorship() bool { + m.mutex.Lock() + defer m.mutex.Unlock() + + now := time.Now() + lastBlockNumber, lastBlockTime := m.progressReporter.BlockProgress() + + // When there is a suspicion, we're waiting for one of two things: + // - either a new block arrives, with a number >= than the header that triggered the suspicion, + // in which case we remove the suspicion, or + // - the timeout elapses, in which case the suspicion is reported as a censorship event. + if m.suspicion { + if !lastBlockTime.IsZero() && lastBlockNumber >= m.suspicionBlockNumber { + m.logger.Debugf("[%s] last block number: %d >= than suspicion header number: %d; suspicion disproved", m.chainID, lastBlockNumber, m.suspicionBlockNumber) + m.suspicion = false + m.suspicionBlockNumber = 0 + m.suspicionTime = time.Time{} + } else if now.After(m.suspicionTime.Add(m.timeoutConfig.BlockCensorshipTimeout)) { + m.logger.Warningf("[%s] block censorship timeout (%s) expired; suspicion time: %s; last block number: %d < than suspicion header number: %d; censorship event detected", + m.chainID, m.timeoutConfig.BlockCensorshipTimeout, m.suspicionTime.Format(logTimeFormat), lastBlockNumber, m.suspicionBlockNumber) + m.suspicion = false + m.suspicionBlockNumber = 0 + m.suspicionTime = time.Time{} + return true + } + } + + if m.suspicion { + // When there is a pending suspicion, advancing headers cannot disprove it, only a new block. + // Thus, there is no point in checking the progress of headers now. + m.logger.Debugf("[%s] suspicion pending: block number: %d, header number: %d, header time: %s; timeout expires in: %s", + m.chainID, lastBlockNumber, m.suspicionBlockNumber, m.suspicionTime.Format(logTimeFormat), m.suspicionTime.Add(m.timeoutConfig.BlockCensorshipTimeout).Sub(now)) + return false + } + + var ahead []timeNumber + for ep, hRcvMon := range m.hdrRcvTrackers { + if hRcvMon.headerReceiver == nil { + m.logger.Debugf("[%s] header receiver: %s is nil, skipping", m.chainID, ep) + continue + } + headerNum, headerTime, err := hRcvMon.headerReceiver.LastBlockNum() + if err != nil { + m.logger.Debugf("[%s] header receiver: %s, error getting last block number, skipping; err: %s", m.chainID, ep, err) + continue + } + if (!lastBlockTime.IsZero() && headerNum > lastBlockNumber) || lastBlockTime.IsZero() { + m.logger.Debugf("[%s] header receiver: %s, is ahead of block receiver; header num=%d, time=%s; block num=%d, time=%s", + m.chainID, ep, headerNum, headerTime.Format(logTimeFormat), lastBlockNumber, lastBlockTime.Format(logTimeFormat)) + ahead = append(ahead, timeNumber{ + t: headerTime, + n: headerNum, + }) + } + } + + if len(ahead) == 0 { + return false + } + + if m.logger.IsEnabledFor(zapcore.DebugLevel) { + var b strings.Builder + for _, tn := range ahead { + b.WriteString(fmt.Sprintf("(t: %s, n: %d); ", tn.t.Format(logTimeFormat), tn.n)) + } + m.logger.Debugf("[%s] %d header receivers are ahead of block receiver, out of %d endpoints; ahead: %s", m.chainID, len(ahead), len(m.fetchSources), b.String()) + } + + firstAhead := ahead[0] + for _, tn := range ahead { + if tn.t.Before(firstAhead.t) { + firstAhead = tn + } + } + + m.suspicion = true + m.suspicionTime = firstAhead.t + m.suspicionBlockNumber = firstAhead.n + + m.logger.Debugf("[%s] block censorship suspicion triggered, header time=%s, number=%d; last block time=%s, number=%d", + m.chainID, m.suspicionTime.Format(logTimeFormat), m.suspicionBlockNumber, lastBlockTime.Format(logTimeFormat), lastBlockNumber) + + return false +} + +// GetSuspicion returns the suspicion flag, and the header block number that is ahead. +// If suspicion==false, then suspicionBlockNumber==0. +// +// Used mainly for testing. +func (m *BFTCensorshipMonitor) GetSuspicion() (bool, uint64) { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.suspicion, m.suspicionBlockNumber +} + +func (m *BFTCensorshipMonitor) launchHeaderReceivers() error { + m.mutex.Lock() + defer m.mutex.Unlock() + + numEP := len(m.fetchSources) + if numEP <= 0 { + return errors.New("no endpoints") + } + + hRcvToCreate := make([]*orderers.Endpoint, 0) + now := time.Now() + for i, ep := range m.fetchSources { + if i == m.blockSourceIndex { + continue // skip the block source + } + + hRcvMon := m.hdrRcvTrackers[ep.Address] + // Create a header receiver to sources that + // - don't have a running receiver already, and + // - don't have a retry deadline in the future + if hRcvMon.headerReceiver != nil { + if !hRcvMon.headerReceiver.IsStopped() { + m.logger.Debugf("[%s] Header receiver to: %s, is running", m.chainID, ep.Address) + continue + } + + // When a receiver stops, we retry to restart. If there are repeated failures, we use an increasing + // retry interval. The retry interval is exponential in the number of failures in a certain time window, + // but is not greater that a maximum interval. + if hRcvMon.headerReceiver.IsStarted() && hRcvMon.headerReceiver.IsStopped() { + // Prune all failure times older than the stop history window. + // If it is a new failure event, add a restart deadline in the future. + hRcvMon.pruneOlderThan(now.Add(-m.stopHistoryWindowDur)) + stopTime := hRcvMon.headerReceiver.GetErrorStopTime() + if added := hRcvMon.appendIfNewer(stopTime); added { + dur := backOffDuration(2.0, uint(hRcvMon.stopTimes.Len()-1), m.timeoutConfig.MinRetryInterval, m.timeoutConfig.MaxRetryInterval) + m.logger.Debugf("[%s] Header receiver to: %s, had stopped, (%s), retry in %s", m.chainID, ep.Address, stopTime.Format(logTimeFormat), dur) + hRcvMon.retryDeadline = now.Add(dur) + } + } + } + + if hRcvMon.retryDeadline.After(now) { + m.logger.Debugf("[%s] Headers receiver to: %s, has a retry deadline in the future, retry in %s", m.chainID, ep.Address, hRcvMon.retryDeadline.Sub(now)) + continue + } + + hRcvToCreate = append(hRcvToCreate, ep) + } + + m.logger.Debugf("[%s] Going to create %d header receivers: %+v", m.chainID, len(hRcvToCreate), hRcvToCreate) + + for _, ep := range hRcvToCreate { + hrRcvMon := m.hdrRcvTrackers[ep.Address] + // This may fail if the orderer is down or faulty. If it fails, we back off and retry. + // We count connection failure attempts (here) and stop failures separately. + headerClient, _, err := m.newHeaderClient(ep, hrRcvMon.headerReceiver) // TODO use the clientCloser function + if err != nil { + dur := backOffDuration(2.0, hrRcvMon.connectFailureCounter, m.timeoutConfig.MinRetryInterval, m.timeoutConfig.MaxRetryInterval) + hrRcvMon.retryDeadline = time.Now().Add(dur) + hrRcvMon.connectFailureCounter++ + m.logger.Debugf("[%s] Failed to create a header receiver to: %s, failure no. %d, will retry in %s", m.chainID, ep.Address, hrRcvMon.connectFailureCounter, dur) + continue + } + + hrRcvMon.headerReceiver = NewBFTHeaderReceiver(m.chainID, ep.Address, headerClient, m.headerVerifier, hrRcvMon.headerReceiver, flogging.MustGetLogger("BFTHeaderReceiver")) + hrRcvMon.connectFailureCounter = 0 + hrRcvMon.retryDeadline = time.Time{} + + m.logger.Debugf("[%s] Created a header receiver to: %s", m.chainID, ep.Address) + go hrRcvMon.headerReceiver.DeliverHeaders() + m.logger.Debugf("[%s] Launched a header receiver to: %s", m.chainID, ep.Address) + } + + m.logger.Debugf("Exit: number of endpoints: %d", numEP) + return nil +} + +// newHeaderClient connects to the orderer's delivery service and requests a stream of headers. +// Seek from the largest of the block progress and the last good header from the previous header receiver. +func (m *BFTCensorshipMonitor) newHeaderClient(endpoint *orderers.Endpoint, prevHeaderReceiver *BFTHeaderReceiver) (deliverClient orderer.AtomicBroadcast_DeliverClient, clientCloser func(), err error) { + blockNumber, blockTime := m.progressReporter.BlockProgress() + if !blockTime.IsZero() { + blockNumber++ // If blockTime.IsZero(), we request block number 0, else blockNumber+1 + } + + if prevHeaderReceiver != nil { + hNum, _, errH := prevHeaderReceiver.LastBlockNum() + if errH == nil && (hNum+1) > blockNumber { + blockNumber = hNum + 1 + } + } + + seekInfoEnv, err := m.requester.SeekInfoHeadersFrom(blockNumber) + if err != nil { + return nil, nil, errors.Wrap(err, "could not create a signed Deliver SeekInfo message, something is critically wrong") + } + + deliverClient, clientCloser, err = m.requester.Connect(seekInfoEnv, endpoint) + if err != nil { + return nil, nil, errors.Wrap(err, "could not connect to ordering service") + } + + return deliverClient, clientCloser, nil +} diff --git a/internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory.go b/internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory.go new file mode 100644 index 00000000000..b8762587651 --- /dev/null +++ b/internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory.go @@ -0,0 +1,25 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider + +import "github.com/hyperledger/fabric/internal/pkg/peer/orderers" + +// BFTCensorshipMonitorFactory creates an instance of a BFTCensorshipMonitor. It is an implementation of the +// CensorshipDetectorFactory interface which abstracts the creation of a BFTCensorshipMonitor. +type BFTCensorshipMonitorFactory struct{} + +func (f *BFTCensorshipMonitorFactory) Create( + chainID string, + verifier BlockVerifier, + requester DeliverClientRequester, + progressReporter BlockProgressReporter, + fetchSources []*orderers.Endpoint, + blockSourceIndex int, + timeoutConf TimeoutConfig, +) CensorshipDetector { + return NewBFTCensorshipMonitor(chainID, verifier, requester, progressReporter, fetchSources, blockSourceIndex, timeoutConf) +} diff --git a/internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory_test.go b/internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory_test.go new file mode 100644 index 00000000000..5f5a247e88d --- /dev/null +++ b/internal/pkg/peer/blocksprovider/bft_censorship_monitor_factory_test.go @@ -0,0 +1,21 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider_test + +import ( + "testing" + + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" + "github.com/stretchr/testify/require" +) + +func TestNewBFTCensorshipMonitorFactory(t *testing.T) { + s := newMonitorTestSetup(t, 5) + f := &blocksprovider.BFTCensorshipMonitorFactory{} + mon := f.Create(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, 0, blocksprovider.TimeoutConfig{}) + require.NotNil(t, mon) +} diff --git a/internal/pkg/peer/blocksprovider/bft_censorship_monitor_test.go b/internal/pkg/peer/blocksprovider/bft_censorship_monitor_test.go new file mode 100644 index 00000000000..5b08b2090eb --- /dev/null +++ b/internal/pkg/peer/blocksprovider/bft_censorship_monitor_test.go @@ -0,0 +1,770 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider_test + +import ( + "fmt" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider/fake" + "github.com/hyperledger/fabric/internal/pkg/peer/orderers" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestNewBFTCensorshipMonitor_New(t *testing.T) { + flogging.ActivateSpec("debug") + + s := newMonitorTestSetup(t, 5) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, 0, blocksprovider.TimeoutConfig{}) + require.NotNil(t, mon) +} + +// Scenario: +// - start the monitor, with a single source +// - stop the monitor without reading from error channel +func TestBFTCensorshipMonitor_Stop(t *testing.T) { + flogging.ActivateSpec("debug") + + s := newMonitorTestSetup(t, 1) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, 0, blocksprovider.TimeoutConfig{}) + require.NotNil(t, mon) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + mon.Monitor() + wg.Done() + }() + + mon.Stop() + wg.Wait() +} + +// Scenario: +// - start the monitor, with a single source +// - stop the monitor, ensure error channel contains an error +func TestBFTCensorshipMonitor_StopWithErrors(t *testing.T) { + flogging.ActivateSpec("debug") + + s := newMonitorTestSetup(t, 1) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, 0, blocksprovider.TimeoutConfig{}) + require.NotNil(t, mon) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + mon.Stop() + wg.Wait() + require.EqualError(t, err, "received a stop signal") +} + +// Scenario: +// - start the monitor, with no sources +// - monitor exits, ensure error channel contains an error +func TestBFTCensorshipMonitor_NoSources(t *testing.T) { + flogging.ActivateSpec("debug") + + s := newMonitorTestSetup(t, 0) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, 0, blocksprovider.TimeoutConfig{}) + require.NotNil(t, mon) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + mon.Stop() + wg.Wait() + require.EqualError(t, err, "no endpoints") +} + +// Scenario: +// - start the monitor, with 4 sources +// - the monitor connects to all but the block source +// - no blocks, no headers, block progress is called repeatedly, returns zero value +// - headers are seeked from 0 +func TestBFTCensorshipMonitor_NoHeadersNoBlocks(t *testing.T) { + flogging.ActivateSpec("debug") + + numOrderers := 4 + blockSource := 1 + tConfig := blocksprovider.TimeoutConfig{ + MinRetryInterval: 1 * time.Millisecond, + MaxRetryInterval: 20 * time.Millisecond, + BlockCensorshipTimeout: time.Second, + } + s := newMonitorTestSetup(t, numOrderers) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, blockSource, tConfig) + require.NotNil(t, mon) + + fakeEnv := &common.Envelope{Payload: []byte("bogus"), Signature: []byte("bogus")} + s.fakeRequester.SeekInfoHeadersFromReturns(fakeEnv, nil) + // Connect returns a client that blocks on Recv() + s.fakeRequester.ConnectCalls( + func(envelope *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + index := address2index(endpoint.Address) + client := &fake.DeliverClient{} + + client.RecvCalls(func() (*orderer.DeliverResponse, error) { + resp := <-s.sourceStream[index] + if resp == nil { + return nil, errors.New("test-closing") + } + return resp, nil + }) + + client.CloseSendCalls(func() error { + close(s.sourceStream[index]) + return nil + }) + + return client, func() {}, nil + }) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + require.Eventually(t, func() bool { return s.fakeRequester.SeekInfoHeadersFromCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeRequester.ConnectCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeProgressReporter.BlockProgressCallCount() >= 9 }, 5*time.Second, 10*time.Millisecond) + for i := 0; i < s.fakeRequester.ConnectCallCount(); i++ { + n := s.fakeRequester.SeekInfoHeadersFromArgsForCall(i) + require.Equal(t, uint64(0), n) + env, ep := s.fakeRequester.ConnectArgsForCall(i) + require.NotEqual(t, s.sources[s.sourceIndex].Address, ep) + require.Equal(t, fakeEnv, env) + } + + mon.Stop() + wg.Wait() + require.EqualError(t, err, "received a stop signal") +} + +// Scenario: +// - start the monitor, with 4 sources +// - the monitor connects to all but the block source +// - block progress returns {7, now} +// - one header returns {8} +// - suspicion raised, after timeout, censorship detected +func TestBFTCensorshipMonitor_CensorshipDetected(t *testing.T) { + flogging.ActivateSpec("debug") + + numOrderers := 4 + blockSource := 0 + tConfig := blocksprovider.TimeoutConfig{ + MinRetryInterval: 1 * time.Millisecond, + MaxRetryInterval: 20 * time.Millisecond, + BlockCensorshipTimeout: time.Second, + } + s := newMonitorTestSetup(t, numOrderers) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, blockSource, tConfig) + require.NotNil(t, mon) + + fakeEnv := &common.Envelope{Payload: []byte("bogus"), Signature: []byte("bogus")} + s.fakeRequester.SeekInfoHeadersFromReturns(fakeEnv, nil) + // Connect returns a client that blocks on Recv() + s.fakeRequester.ConnectCalls( + func(envelope *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + index := address2index(endpoint.Address) + client := &fake.DeliverClient{} + + client.RecvCalls(func() (*orderer.DeliverResponse, error) { + resp := <-s.sourceStream[index] + if resp == nil { + return nil, errors.New("test-closing") + } + return resp, nil + }) + + client.CloseSendCalls(func() error { + close(s.sourceStream[index]) + return nil + }) + + return client, func() {}, nil + }) + b7time := time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(7), b7time) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + require.Eventually(t, func() bool { return s.fakeRequester.SeekInfoHeadersFromCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeRequester.ConnectCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeProgressReporter.BlockProgressCallCount() > 9 }, 5*time.Second, 10*time.Millisecond) + for i := 0; i < s.fakeRequester.ConnectCallCount(); i++ { + n := s.fakeRequester.SeekInfoHeadersFromArgsForCall(i) + require.Equal(t, uint64(8), n, "should seek from block 8") + env, ep := s.fakeRequester.ConnectArgsForCall(i) + require.NotEqual(t, s.sources[s.sourceIndex].Address, ep, "should not connect to block source") + require.Equal(t, fakeEnv, env, "should connect with expected envelope") + } + + // one header is ahead + s.sourceStream[1] <- makeDeliverResponseBlock(8) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == uint64(8) + }, + 5*time.Second, 1*time.Millisecond, "suspicion should be raised on block number 8") + + wg.Wait() + require.EqualError(t, err, "block censorship detected, endpoint: Address: orderer-address-0, CertHash: 08D6C05A21512A79A1DFEB9D2A8F262F") +} + +// Scenario: +// - start the monitor, with 4 sources +// - the monitor connects to all but the block source +// - block progress returns {7, now} +// - header receivers return {8, 9, 10} +// - suspicion raised, block advances to 8, then 9, +// - after timeout, censorship detected on 10 +func TestBFTCensorshipMonitor_SuspicionsRemovedCensorshipDetected(t *testing.T) { + flogging.ActivateSpec("debug") + + numOrderers := 4 + blockSource := 0 + tConfig := blocksprovider.TimeoutConfig{ + MinRetryInterval: 1 * time.Millisecond, + MaxRetryInterval: 20 * time.Millisecond, + BlockCensorshipTimeout: time.Second, + } + s := newMonitorTestSetup(t, numOrderers) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, blockSource, tConfig) + require.NotNil(t, mon) + + fakeEnv := &common.Envelope{Payload: []byte("bogus"), Signature: []byte("bogus")} + s.fakeRequester.SeekInfoHeadersFromReturns(fakeEnv, nil) + // Connect returns a client that blocks on Recv() + s.fakeRequester.ConnectCalls( + func(envelope *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + index := address2index(endpoint.Address) + client := &fake.DeliverClient{} + + client.RecvCalls(func() (*orderer.DeliverResponse, error) { + resp := <-s.sourceStream[index] + if resp == nil { + return nil, errors.New("test-closing") + } + return resp, nil + }) + + client.CloseSendCalls(func() error { + close(s.sourceStream[index]) + return nil + }) + + return client, func() {}, nil + }) + blockTime := time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(7), blockTime) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + require.Eventually(t, func() bool { return s.fakeRequester.SeekInfoHeadersFromCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeRequester.ConnectCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeProgressReporter.BlockProgressCallCount() > 9 }, 5*time.Second, 10*time.Millisecond) + for i := 0; i < s.fakeRequester.ConnectCallCount(); i++ { + n := s.fakeRequester.SeekInfoHeadersFromArgsForCall(i) + require.Equal(t, uint64(8), n, "should seek from block 8") + env, ep := s.fakeRequester.ConnectArgsForCall(i) + require.NotEqual(t, s.sources[s.sourceIndex].Address, ep, "should not connect to block source") + require.Equal(t, fakeEnv, env, "should connect with expected envelope") + } + + // 3 headers are ahead + s.sourceStream[1] <- makeDeliverResponseBlock(8) + time.Sleep(5 * time.Millisecond) + s.sourceStream[2] <- makeDeliverResponseBlock(9) + time.Sleep(5 * time.Millisecond) + s.sourceStream[3] <- makeDeliverResponseBlock(10) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == uint64(8) + }, + 5*time.Second, 1*time.Millisecond, "suspicion should be raised on block number 8") + + blockTime = time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(8), blockTime) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == uint64(9) + }, + 5*time.Second, 1*time.Millisecond, "suspicion should be raised on block number 9") + + blockTime = time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(9), blockTime) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == uint64(10) + }, + 5*time.Second, 1*time.Millisecond, "suspicion should be raised on block number 10") + + wg.Wait() + require.EqualError(t, err, "block censorship detected, endpoint: Address: orderer-address-0, CertHash: 08D6C05A21512A79A1DFEB9D2A8F262F") +} + +// Scenario: +// - start the monitor, with 4 sources +// - the monitor connects to all but the block source +// - block progress returns {n, now} +// - one header returns {n+1}, suspicion raised, +// - before timeout, new block (n+1) arrives, suspicion removed +// - repeat the above x7, each time the header is coming from a different source +func TestBFTCensorshipMonitor_SuspicionRemoved(t *testing.T) { + flogging.ActivateSpec("debug") + + numOrderers := 4 + blockSource := 0 + tConfig := blocksprovider.TimeoutConfig{ + MinRetryInterval: 1 * time.Millisecond, + MaxRetryInterval: 20 * time.Millisecond, + BlockCensorshipTimeout: 5 * time.Second, + } + s := newMonitorTestSetup(t, numOrderers) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, blockSource, tConfig) + require.NotNil(t, mon) + + fakeEnv := &common.Envelope{Payload: []byte("bogus"), Signature: []byte("bogus")} + s.fakeRequester.SeekInfoHeadersFromReturns(fakeEnv, nil) + // Connect returns a client that blocks on Recv() + s.fakeRequester.ConnectCalls( + func(envelope *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + index := address2index(endpoint.Address) + client := &fake.DeliverClient{} + + client.RecvCalls(func() (*orderer.DeliverResponse, error) { + resp := <-s.sourceStream[index] + if resp == nil { + return nil, errors.New("test-closing") + } + return resp, nil + }) + + client.CloseSendCalls(func() error { + close(s.sourceStream[index]) + return nil + }) + + return client, func() {}, nil + }) + blockTime := time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(7), blockTime) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + require.Eventually(t, func() bool { return s.fakeRequester.SeekInfoHeadersFromCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeRequester.ConnectCallCount() == 3 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeProgressReporter.BlockProgressCallCount() > 9 }, 5*time.Second, 10*time.Millisecond) + for i := 0; i < s.fakeRequester.ConnectCallCount(); i++ { + n := s.fakeRequester.SeekInfoHeadersFromArgsForCall(i) + require.Equal(t, uint64(8), n, "should seek from block 8") + env, ep := s.fakeRequester.ConnectArgsForCall(i) + require.NotEqual(t, s.sources[s.sourceIndex].Address, ep, "should not connect to block source") + require.Equal(t, fakeEnv, env, "should connect with expected envelope") + } + + for n := uint64(8); n < uint64(15); n++ { + // one header is ahead, coming from a different source every time + headerSourceIndex := n%3 + 1 + s.sourceStream[headerSourceIndex] <- makeDeliverResponseBlock(n) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == n + }, + 5*time.Second, 10*time.Millisecond, "suspicion should be raised") + + blockTime = time.Now() + s.fakeProgressReporter.BlockProgressReturns(n, blockTime) + + require.Eventually(t, + func() bool { + susp, _ := mon.GetSuspicion() + return !susp + }, + 5*time.Second, 10*time.Millisecond, "suspicion should be removed") + } + + mon.Stop() + wg.Wait() + require.EqualError(t, err, "received a stop signal") +} + +// Scenario: +// - start the monitor, with 7 sources +// - the monitor tries to connect to all but the block source (index=0), +// - one orderer is faulty (index=1), Recv returns errors +// - one orderer is down (index=2), cannot connect +// +// - block progress returns {n, now} +// - one header returns {n+1}, suspicion raised, +// - before timeout, new block (n+1) arrives, suspicion removed +// - repeat the above x7, each time the header is coming from a different source +func TestBFTCensorshipMonitor_FaultySourceIgnored(t *testing.T) { + flogging.ActivateSpec("debug") + + numOrderers := 7 + blockSource := 0 + tConfig := blocksprovider.TimeoutConfig{ + MinRetryInterval: 5 * time.Millisecond, + MaxRetryInterval: 20 * time.Millisecond, + BlockCensorshipTimeout: 5 * time.Second, + } + s := newMonitorTestSetup(t, numOrderers) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, blockSource, tConfig) + require.NotNil(t, mon) + + fakeEnv := &common.Envelope{Payload: []byte("bogus"), Signature: []byte("bogus")} + s.fakeRequester.SeekInfoHeadersFromReturns(fakeEnv, nil) + // Connect returns a client that blocks on Recv() + s.fakeRequester.ConnectCalls( + func(envelope *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + index := address2index(endpoint.Address) + client := &fake.DeliverClient{} + + switch index { + case 1: + client.RecvReturns(nil, errors.New("test: faulty source")) + client.CloseSendCalls(func() error { + return nil + }) + case 2: + return nil, nil, errors.New("test: cannot connect") + default: + client.RecvCalls(func() (*orderer.DeliverResponse, error) { + resp := <-s.sourceStream[index] + if resp == nil { + return nil, errors.New("test-closing") + } + return resp, nil + }) + client.CloseSendCalls(func() error { + close(s.sourceStream[index]) + return nil + }) + } + + return client, func() {}, nil + }) + blockTime := time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(7), blockTime) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + require.Eventually(t, func() bool { return s.fakeRequester.SeekInfoHeadersFromCallCount() >= 6 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeRequester.ConnectCallCount() >= 6 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeProgressReporter.BlockProgressCallCount() >= 12 }, 5*time.Second, 10*time.Millisecond) + for i := 0; i < s.fakeRequester.ConnectCallCount(); i++ { + n := s.fakeRequester.SeekInfoHeadersFromArgsForCall(i) + require.Equal(t, uint64(8), n, "should seek from block 8") + env, ep := s.fakeRequester.ConnectArgsForCall(i) + require.NotEqual(t, s.sources[s.sourceIndex].Address, ep, "should not connect to block source") + require.Equal(t, fakeEnv, env, "should connect with expected envelope") + } + + for n := uint64(8); n < uint64(15); n++ { + // one header is ahead, coming from a different honest source every time + headerSourceIndex := n%4 + 3 + s.sourceStream[headerSourceIndex] <- makeDeliverResponseBlock(n) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == n + }, + 5*time.Second, 10*time.Millisecond, "suspicion should be raised") + + blockTime = time.Now() + s.fakeProgressReporter.BlockProgressReturns(n, blockTime) + + require.Eventually(t, + func() bool { + susp, _ := mon.GetSuspicion() + return !susp + }, + 5*time.Second, 10*time.Millisecond, "suspicion should be removed") + } + + mon.Stop() + wg.Wait() + require.EqualError(t, err, "received a stop signal") +} + +// Scenario: +// - start the monitor, with 4 sources +// - the monitor tries to connect to all but the block source (index=0), +// - for the first 10 calls: +// - one orderer is faulty (index=1), Recv returns errors +// - one orderer is down (index=2), cannot connect +// - then these sources recover +// +// - one orderer remains down (index=3) +// +// - block progress returns {n, now} +// - the recovered sources returns {n+1}, suspicion raised, +// - before timeout, new block (n+1) arrives, suspicion removed +// - repeat the above x7, each time the header is coming from a different recovered source +func TestBFTCensorshipMonitor_FaultySourceRecovery(t *testing.T) { + flogging.ActivateSpec("debug") + + numOrderers := 4 + blockSource := 0 + tConfig := blocksprovider.TimeoutConfig{ + MinRetryInterval: 5 * time.Millisecond, + MaxRetryInterval: 80 * time.Millisecond, + BlockCensorshipTimeout: 1 * time.Second, + } + s := newMonitorTestSetup(t, numOrderers) + mon := blocksprovider.NewBFTCensorshipMonitor(s.channelID, s.fakeBlockVerifier, s.fakeRequester, s.fakeProgressReporter, s.sources, blockSource, tConfig) + require.NotNil(t, mon) + + fakeEnv := &common.Envelope{Payload: []byte("bogus"), Signature: []byte("bogus")} + s.fakeRequester.SeekInfoHeadersFromReturns(fakeEnv, nil) + // Connect returns a client that blocks on Recv() + callNum1 := 0 + callNum2 := 0 + s.fakeRequester.ConnectCalls( + func(envelope *common.Envelope, endpoint *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + index := address2index(endpoint.Address) + client := &fake.DeliverClient{} + + switch index { + case 1: + callNum1++ + if callNum1 <= 10 { + client.RecvReturns(nil, errors.New("test: faulty source")) + client.CloseSendCalls(func() error { + return nil + }) + return client, func() {}, nil + } + case 2: + callNum2++ + if callNum2 <= 10 { + return nil, nil, errors.New("test: cannot connect") + } + case 3: + return nil, nil, errors.New("test: cannot connect") + } + + client.RecvCalls(func() (*orderer.DeliverResponse, error) { + resp := <-s.sourceStream[index] + if resp == nil { + return nil, errors.New("test-closing") + } + return resp, nil + }) + client.CloseSendCalls(func() error { + close(s.sourceStream[index]) + return nil + }) + + return client, func() {}, nil + }) + blockTime := time.Now() + s.fakeProgressReporter.BlockProgressReturns(uint64(7), blockTime) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + mon.Monitor() + wg.Done() + }() + + var err error + go func() { + err = <-mon.ErrorsChannel() + wg.Done() + }() + + require.Eventually(t, func() bool { return s.fakeRequester.SeekInfoHeadersFromCallCount() >= 30 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeRequester.ConnectCallCount() >= 30 }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return s.fakeProgressReporter.BlockProgressCallCount() >= 60 }, 5*time.Second, 10*time.Millisecond) + for i := 0; i < s.fakeRequester.ConnectCallCount(); i++ { + n := s.fakeRequester.SeekInfoHeadersFromArgsForCall(i) + require.Equal(t, uint64(8), n, "should seek from block 8") + env, ep := s.fakeRequester.ConnectArgsForCall(i) + require.NotEqual(t, s.sources[s.sourceIndex].Address, ep, "should not connect to block source") + require.Equal(t, fakeEnv, env, "should connect with expected envelope") + } + + for n := uint64(8); n < uint64(15); n++ { + // one header is ahead, coming from a different recovered source (1,2) every time + headerSourceIndex := n%2 + 1 + s.sourceStream[headerSourceIndex] <- makeDeliverResponseBlock(n) + + require.Eventually(t, + func() bool { + susp, num := mon.GetSuspicion() + return susp && num == n + }, + 5*time.Second, 10*time.Millisecond, "suspicion should be raised") + + blockTime = time.Now() + s.fakeProgressReporter.BlockProgressReturns(n, blockTime) + + require.Eventually(t, + func() bool { + susp, _ := mon.GetSuspicion() + return !susp + }, + 5*time.Second, 10*time.Millisecond, "suspicion should be removed") + } + + mon.Stop() + wg.Wait() + require.EqualError(t, err, "received a stop signal") +} + +type monitorTestSetup struct { + channelID string + fakeBlockVerifier *fake.BlockVerifier + fakeProgressReporter *fake.BlockProgressReporter + fakeRequester *fake.DeliverClientRequester + sources []*orderers.Endpoint + sourceIndex int + sourceStream []chan *orderer.DeliverResponse +} + +func newMonitorTestSetup(t *testing.T, numSources int) *monitorTestSetup { + s := &monitorTestSetup{ + channelID: "testchannel", + fakeBlockVerifier: &fake.BlockVerifier{}, + fakeProgressReporter: &fake.BlockProgressReporter{}, + fakeRequester: &fake.DeliverClientRequester{}, + sources: nil, + sourceIndex: 0, + } + + for i := 0; i < numSources; i++ { + s.sources = append(s.sources, &orderers.Endpoint{ + Address: fmt.Sprintf("orderer-address-%d", i), + RootCerts: [][]byte{{1, 2, 3, 4}}, + Refreshed: make(chan struct{}), + }) + s.sourceStream = append(s.sourceStream, make(chan *orderer.DeliverResponse)) + } + + return s +} + +func address2index(addr string) int { + tokens := strings.Split(addr, "-") + i, _ := strconv.Atoi(tokens[2]) + return i +} + +func makeDeliverResponseBlock(n uint64) *orderer.DeliverResponse { + return &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{ + Header: &common.BlockHeader{ + Number: n, + }, + }, + }, + } +} diff --git a/internal/pkg/peer/blocksprovider/bft_deliverer.go b/internal/pkg/peer/blocksprovider/bft_deliverer.go index cb10b42be5c..8b82eee61c1 100644 --- a/internal/pkg/peer/blocksprovider/bft_deliverer.go +++ b/internal/pkg/peer/blocksprovider/bft_deliverer.go @@ -18,23 +18,57 @@ import ( "github.com/pkg/errors" ) -// BFTDeliverer TODO this is a skeleton +//go:generate counterfeiter -o fake/censorship_detector.go --fake-name CensorshipDetector . CensorshipDetector +type CensorshipDetector interface { + Monitor() + Stop() + ErrorsChannel() <-chan error +} + +//go:generate counterfeiter -o fake/censorship_detector_factory.go --fake-name CensorshipDetectorFactory . CensorshipDetectorFactory +type CensorshipDetectorFactory interface { + Create( + chainID string, + verifier BlockVerifier, + requester DeliverClientRequester, + progressReporter BlockProgressReporter, + fetchSources []*orderers.Endpoint, + blockSourceIndex int, + timeoutConf TimeoutConfig, + ) CensorshipDetector +} + +//go:generate counterfeiter -o fake/duration_exceeded_handler.go --fake-name DurationExceededHandler . DurationExceededHandler +type DurationExceededHandler interface { + DurationExceededHandler() (stopRetries bool) +} + +// BFTDeliverer fetches blocks using a block receiver and maintains a BFTCensorshipMonitor. +// It maintains a shuffled orderer source slice, and will cycle through it trying to find a "good" orderer to fetch +// blocks from. After it selects an orderer to fetch blocks from, it assigns all the rest of the orderers to the +// censorship monitor. The censorship monitor will request block attestations (header+sigs) from said orderers, and +// will monitor their progress relative to the block fetcher. If a censorship suspicion is detected, the BFTDeliverer +// will try to find another orderer to fetch from. type BFTDeliverer struct { // TODO - ChannelID string - BlockHandler BlockHandler - Ledger LedgerInfo - BlockVerifier BlockVerifier - Dialer Dialer - Orderers OrdererConnectionSource - DoneC chan struct{} - Signer identity.SignerSerializer - DeliverStreamer DeliverStreamer - Logger *flogging.FabricLogger + ChannelID string + BlockHandler BlockHandler + Ledger LedgerInfo + BlockVerifier BlockVerifier + Dialer Dialer + Orderers OrdererConnectionSource + DoneC chan struct{} + Signer identity.SignerSerializer + DeliverStreamer DeliverStreamer + CensorshipDetectorFactory CensorshipDetectorFactory + Logger *flogging.FabricLogger - // The maximal value of the actual retry interval, which cannot increase beyond this value - MaxRetryInterval time.Duration // The initial value of the actual retry interval, which is increased on every failed retry InitialRetryInterval time.Duration + // The maximal value of the actual retry interval, which cannot increase beyond this value + MaxRetryInterval time.Duration + // If a certain header from a header receiver is in front of the block receiver for more that this time, a + // censorship event is declared and the block source is changed. + BlockCensorshipTimeout time.Duration // After this duration, the MaxRetryDurationExceededHandler is called to decide whether to keep trying MaxRetryDuration time.Duration // This function is called after MaxRetryDuration of failed retries to decide whether to keep trying @@ -47,22 +81,20 @@ type BFTDeliverer struct { // TODO requester *DeliveryRequester - mutex sync.Mutex // mutex protects the following fields - stopFlag bool // mark the Deliverer as stopped - nextBlockNumber uint64 // next block number - lastBlockTime time.Time // last block time - lastBlockSourceIndex int // the source index of the last block we got, or -1 - fetchFailureCounter int // counts the number of consecutive failures to fetch a block + mutex sync.Mutex // mutex protects the following fields + stopFlag bool // mark the Deliverer as stopped + nextBlockNumber uint64 // next block number + lastBlockTime time.Time // last block time + lastBlockSourceIndex int // the source index of the last block we got, or -1 + fetchFailureCounter int // counts the number of consecutive failures to fetch a block + fetchFailureTotalSleepDuration time.Duration // the cumulative sleep time from when fetchFailureCounter goes 0->1 fetchSources []*orderers.Endpoint fetchSourceIndex int fetchErrorsC chan error - blockReceiver *BlockReceiver - - // TODO here we'll have a CensorshipMonitor component that detects block censorship. - // When it suspects censorship, it will emit an error to this channel. - monitorErrorsC chan error + blockReceiver *BlockReceiver + censorshipMonitor CensorshipDetector } func (d *BFTDeliverer) Initialize() { @@ -75,6 +107,17 @@ func (d *BFTDeliverer) Initialize() { ) } +func (d *BFTDeliverer) BlockProgress() (uint64, time.Time) { + d.mutex.Lock() + defer d.mutex.Unlock() + + if d.nextBlockNumber == 0 { + return 0, time.Time{} + } + + return d.nextBlockNumber - 1, d.lastBlockTime +} + func (d *BFTDeliverer) DeliverBlocks() { var err error @@ -85,76 +128,155 @@ func (d *BFTDeliverer) DeliverBlocks() { d.Logger.Error("Did not return ledger height, something is critically wrong", err) return } - d.Logger.Infof("Starting DeliverBlocks on channel `%s`, block height=%d", d.ChannelID, d.nextBlockNumber) - // select an initial source randomly + d.Logger.Infof("Starting to DeliverBlocks on channel `%s`, block height=%d", d.ChannelID, d.nextBlockNumber) + defer func() { + d.Logger.Infof("Stopping to DeliverBlocks on channel `%s`, block height=%d", d.ChannelID, d.nextBlockNumber) + }() + + timeoutConfig := TimeoutConfig{ + MinRetryInterval: d.InitialRetryInterval, + MaxRetryInterval: d.MaxRetryInterval, + BlockCensorshipTimeout: d.BlockCensorshipTimeout, + } + + // Refresh and randomize the sources, selects a random initial source, and incurs a random iteration order. d.refreshSources() -FetchAndMonitorLoop: for { + // Compute the backoff duration and wait before retrying. // The backoff duration is doubled with every failed round. // A failed round is when we had moved through all the endpoints without success. // If we get a block successfully from a source, or endpoints are refreshed, the failure count is reset. - if count := d.getFetchFailureCounter(); count > 0 { - rounds := uint(count) - if l := len(d.fetchSources); l > 0 { - rounds = uint(count / len(d.fetchSources)) - } - - dur := backOffDuration(2.0, rounds, bftMinBackoffDelay, bftMaxBackoffDelay) - backOffSleep(dur, d.DoneC) + if stopLoop := d.retryBackoff(); stopLoop { + break } - // assign other endpoints to the monitor + // No endpoints is a non-recoverable error, as new endpoints are a result of fetching new blocks from an orderer. + if len(d.fetchSources) == 0 { + d.Logger.Error("Failure in DeliverBlocks, no orderer endpoints, something is critically wrong") + break + } - // start a block fetcher and a monitor - // a buffered channel so that the fetcher goroutine can send an error and exit w/o waiting for it to be consumed. + // Start a block fetcher; a buffered channel so that the fetcher goroutine can send an error and exit w/o + // waiting for it to be consumed. A block receiver is created within. d.fetchErrorsC = make(chan error, 1) source := d.fetchSources[d.fetchSourceIndex] go d.FetchBlocks(source) - // TODO start a censorship monitor + // Create and start a censorship monitor. + d.censorshipMonitor = d.CensorshipDetectorFactory.Create( + d.ChannelID, d.BlockVerifier, d.requester, d, d.fetchSources, d.fetchSourceIndex, timeoutConfig) + go d.censorshipMonitor.Monitor() - // wait for fetch errors, censorship suspicions, or a stop signal. - select { - case <-d.DoneC: - break FetchAndMonitorLoop - case errFetch := <-d.fetchErrorsC: - if errFetch != nil { - switch errFetch.(type) { - case *errStopping: - // nothing to do - case *errRefreshEndpoint: - // get new endpoints and reassign fetcher and monitor - d.refreshSources() - d.resetFetchFailureCounter() - default: - d.fetchSourceIndex = (d.fetchSourceIndex + 1) % len(d.fetchSources) - d.incFetchFailureCounter() - } - // TODO can it be nil? - } - case errMonitor := <-d.monitorErrorsC: - // TODO until we implement the censorship monitor this nil channel is blocked - if errMonitor != nil { - d.Logger.Warningf("Censorship suspicion: %s", err) - // TODO close the block receiver, increment the index - // TODO - } - // TODO can it be nil? + // Wait for block fetcher & censorship monitor events, or a stop signal. + // Events which cause a retry return nil, non-recoverable errors return an error. + if stopLoop := d.handleFetchAndCensorshipEvents(); stopLoop { + break } + + d.censorshipMonitor.Stop() } - // clean up everything because we are closing + // Clean up everything because we are closing d.mutex.Lock() defer d.mutex.Unlock() d.blockReceiver.Stop() - // TODO stop the monitor + if d.censorshipMonitor != nil { + d.censorshipMonitor.Stop() + } +} + +// retryBackoff computes the backoff duration and wait before retrying. +// The backoff duration is doubled with every failed round. +// A failed round is when we had moved through all the endpoints without success. +// If we get a block successfully from a source, or endpoints are refreshed, the failure count is reset. +func (d *BFTDeliverer) retryBackoff() (stop bool) { + failureCounter, failureTotalSleepDuration := d.getFetchFailureStats() + if failureCounter > 0 { + rounds := uint(failureCounter) + if l := len(d.fetchSources); l > 0 { + rounds = uint(failureCounter / l) + } + + if failureTotalSleepDuration > d.MaxRetryDuration { + if d.MaxRetryDurationExceededHandler() { + d.Logger.Warningf("Attempted to retry block delivery for more than MaxRetryDuration (%s), giving up", d.MaxRetryDuration) + return true + } + d.Logger.Debugf("Attempted to retry block delivery for more than MaxRetryDuration (%s), but handler decided to continue retrying", d.MaxRetryDuration) + } + + dur := backOffDuration(2.0, rounds, d.InitialRetryInterval, d.MaxRetryInterval) + d.Logger.Warningf("Failed to fetch blocks, count=%d, round=%d, going to retry in %s", failureCounter, rounds, dur) + d.sleeper.Sleep(dur, d.DoneC) + d.addFetchFailureSleepDuration(dur) + } + + return false +} + +// handleFetchAndCensorshipEvents waits for events from three channels - for fetch, censorship, and done events. +// If the event is recoverable, false is returned and the main loop will switch to another block source, while +// reassigning the header receivers. If the events are non-recoverable or the stop signal, true is returned. +func (d *BFTDeliverer) handleFetchAndCensorshipEvents() (stopLoop bool) { + d.Logger.Debug("Entry") + + select { + case <-d.DoneC: + d.Logger.Debug("Received the stop signal") + return true + + case errFetch := <-d.fetchErrorsC: + d.Logger.Debugf("Error received from fetchErrorsC channel: %s", errFetch) + + switch errFetch.(type) { + case *ErrStopping: + d.Logger.Debug("FetchBlocks received the stop signal") + return true + case *errRefreshEndpoint: + d.Logger.Info("Refreshed endpoints, going to reassign block fetcher and censorship monitor, and reconnect to ordering service") + d.refreshSources() + d.resetFetchFailureCounter() + return false + case *ErrFatal: + d.Logger.Errorf("Failure in FetchBlocks, something is critically wrong: %s", errFetch) + return true + default: + d.Logger.Debug("FetchBlocks produced an error, going to retry") + d.fetchSourceIndex = (d.fetchSourceIndex + 1) % len(d.fetchSources) + d.incFetchFailureCounter() + return false + } + + case errMonitor := <-d.censorshipMonitor.ErrorsChannel(): + d.Logger.Debugf("Error received from censorshipMonitor.ErrorsChannel: %s", errMonitor) + + switch errMonitor.(type) { + case *ErrStopping: + d.Logger.Debug("CensorshipMonitor received the stop signal") + return true + case *ErrCensorship: + d.Logger.Warningf("Censorship suspicion: %s; going to retry fetching blocks from another orderer", errMonitor) + d.mutex.Lock() + d.blockReceiver.Stop() + d.mutex.Unlock() + d.fetchSourceIndex = (d.fetchSourceIndex + 1) % len(d.fetchSources) + d.incFetchFailureCounter() + return false + case *ErrFatal: + d.Logger.Errorf("Failure in CensorshipMonitor, something is critically wrong: %s", errMonitor) + return true + default: + d.Logger.Errorf("Unexpected error from CensorshipMonitor, something is critically wrong: %s", errMonitor) + return true + } + } } func (d *BFTDeliverer) refreshSources() { // select an initial source randomly - d.fetchSources = shuffle(d.Orderers.Endpoints()) + d.fetchSources = d.Orderers.ShuffledEndpoints() d.Logger.Infof("Refreshed endpoints: %s", d.fetchSources) d.fetchSourceIndex = 0 } @@ -179,7 +301,7 @@ func (d *BFTDeliverer) FetchBlocks(source *orderers.Endpoint) { for { select { case <-d.DoneC: - d.fetchErrorsC <- &errStopping{message: "stopping"} + d.fetchErrorsC <- &ErrStopping{Message: "stopping"} return default: } @@ -187,7 +309,7 @@ func (d *BFTDeliverer) FetchBlocks(source *orderers.Endpoint) { seekInfoEnv, err := d.requester.SeekInfoBlocksFrom(d.getNextBlockNumber()) if err != nil { d.Logger.Errorf("Could not create a signed Deliver SeekInfo message, something is critically wrong: %s", err) - d.fetchErrorsC <- &errFatal{message: fmt.Sprintf("could not create a signed Deliver SeekInfo message: %s", err)} + d.fetchErrorsC <- &ErrFatal{Message: fmt.Sprintf("could not create a signed Deliver SeekInfo message: %s", err)} return } @@ -207,29 +329,46 @@ func (d *BFTDeliverer) FetchBlocks(source *orderers.Endpoint) { recvC: make(chan *orderer.DeliverResponse), stopC: make(chan struct{}), endpoint: source, - logger: d.Logger.With("orderer-address", source.Address), + logger: flogging.MustGetLogger("BlockReceiver").With("orderer-address", source.Address), } d.mutex.Lock() d.blockReceiver = blockRcv d.mutex.Unlock() + // Starts a goroutine that receives blocks from the stream client and places them in the `recvC` channel blockRcv.Start() - if err := blockRcv.ProcessIncoming(d.onBlockProcessingSuccess); err != nil { - d.Logger.Warningf("failure while processing incoming blocks: %s", err) - d.fetchErrorsC <- errors.Wrapf(err, "failure while processing incoming blocks, orderer-address: %s", source.Address) + // Consume blocks fom the `recvC` channel + if errProc := blockRcv.ProcessIncoming(d.onBlockProcessingSuccess); errProc != nil { + switch errProc.(type) { + case *ErrStopping: + // nothing to do + d.Logger.Debugf("BlockReceiver stopped while processing incoming blocks: %s", errProc) + case *errRefreshEndpoint: + d.Logger.Infof("Endpoint refreshed while processing incoming blocks: %s", errProc) + d.fetchErrorsC <- errProc + default: + d.Logger.Warningf("Failure while processing incoming blocks: %s", errProc) + d.fetchErrorsC <- errProc + } + return } } } func (d *BFTDeliverer) onBlockProcessingSuccess(blockNum uint64) { + d.Logger.Debugf("blockNum: %d", blockNum) + d.mutex.Lock() + defer d.mutex.Unlock() + d.fetchFailureCounter = 0 + d.fetchFailureTotalSleepDuration = 0 + d.nextBlockNumber = blockNum + 1 d.lastBlockTime = time.Now() - d.mutex.Unlock() } func (d *BFTDeliverer) resetFetchFailureCounter() { @@ -237,13 +376,14 @@ func (d *BFTDeliverer) resetFetchFailureCounter() { defer d.mutex.Unlock() d.fetchFailureCounter = 0 + d.fetchFailureTotalSleepDuration = 0 } -func (d *BFTDeliverer) getFetchFailureCounter() int { +func (d *BFTDeliverer) getFetchFailureStats() (int, time.Duration) { d.mutex.Lock() defer d.mutex.Unlock() - return d.fetchFailureCounter + return d.fetchFailureCounter, d.fetchFailureTotalSleepDuration } func (d *BFTDeliverer) incFetchFailureCounter() { @@ -253,6 +393,13 @@ func (d *BFTDeliverer) incFetchFailureCounter() { d.fetchFailureCounter++ } +func (d *BFTDeliverer) addFetchFailureSleepDuration(dur time.Duration) { + d.mutex.Lock() + defer d.mutex.Unlock() + + d.fetchFailureTotalSleepDuration += dur +} + func (d *BFTDeliverer) getNextBlockNumber() uint64 { d.mutex.Lock() defer d.mutex.Unlock() diff --git a/internal/pkg/peer/blocksprovider/bft_deliverer_test.go b/internal/pkg/peer/blocksprovider/bft_deliverer_test.go index 90a04090f11..770a20ad35f 100644 --- a/internal/pkg/peer/blocksprovider/bft_deliverer_test.go +++ b/internal/pkg/peer/blocksprovider/bft_deliverer_test.go @@ -7,31 +7,35 @@ SPDX-License-Identifier: Apache-2.0 package blocksprovider_test import ( + "bytes" "fmt" + "math" "sync" "testing" "time" + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/orderer" "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider/fake" "github.com/hyperledger/fabric/internal/pkg/peer/orderers" + "github.com/hyperledger/fabric/protoutil" . "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" ) type bftDelivererTestSetup struct { - withT *WithT - d *blocksprovider.BFTDeliverer - - mutex sync.Mutex // protects the following fields - ccs []*grpc.ClientConn + gWithT *WithT + d *blocksprovider.BFTDeliverer fakeDialer *fake.Dialer - fakeGossipServiceAdapter *fake.GossipServiceAdapter fakeBlockHandler *fake.BlockHandler fakeOrdererConnectionSource *fake.OrdererConnectionSource fakeLedgerInfo *fake.LedgerInfo @@ -39,17 +43,28 @@ type bftDelivererTestSetup struct { fakeSigner *fake.Signer fakeDeliverStreamer *fake.DeliverStreamer fakeDeliverClient *fake.DeliverClient + fakeCensorshipMonFactory *fake.CensorshipDetectorFactory + fakeCensorshipMon *fake.CensorshipDetector fakeSleeper *fake.Sleeper - doneC chan struct{} - recvStep chan struct{} - endC chan struct{} + fakeDurationExceededHandler *fake.DurationExceededHandler + + deliverClientDoneC chan struct{} // signals the deliverClient to exit + recvStepC chan *orderer.DeliverResponse + endC chan struct{} + + mutex sync.Mutex // protects the following fields + clientConnSet []*grpc.ClientConn // client connection set + monitorSet []*fake.CensorshipDetector // monitor set + monEndCSet []chan struct{} // monitor end set + monErrC chan error // the monitor errors channel, where it emits (fake) censorship events + monDoneC chan struct{} // signal the monitor to stop + monEndC chan struct{} // when the monitor stops, it closes this channel } func newBFTDelivererTestSetup(t *testing.T) *bftDelivererTestSetup { s := &bftDelivererTestSetup{ - withT: NewWithT(t), + gWithT: NewWithT(t), fakeDialer: &fake.Dialer{}, - fakeGossipServiceAdapter: &fake.GossipServiceAdapter{}, fakeBlockHandler: &fake.BlockHandler{}, fakeOrdererConnectionSource: &fake.OrdererConnectionSource{}, fakeLedgerInfo: &fake.LedgerInfo{}, @@ -57,23 +72,27 @@ func newBFTDelivererTestSetup(t *testing.T) *bftDelivererTestSetup { fakeSigner: &fake.Signer{}, fakeDeliverStreamer: &fake.DeliverStreamer{}, fakeDeliverClient: &fake.DeliverClient{}, + fakeCensorshipMonFactory: &fake.CensorshipDetectorFactory{}, fakeSleeper: &fake.Sleeper{}, - doneC: make(chan struct{}), - recvStep: make(chan struct{}), + fakeDurationExceededHandler: &fake.DurationExceededHandler{}, + deliverClientDoneC: make(chan struct{}), + recvStepC: make(chan *orderer.DeliverResponse), endC: make(chan struct{}), } return s } -func (s *bftDelivererTestSetup) beforeEach() { +func (s *bftDelivererTestSetup) initialize(t *testing.T) { s.fakeDialer.DialStub = func(string, [][]byte) (*grpc.ClientConn, error) { s.mutex.Lock() defer s.mutex.Unlock() + cc, err := grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) - s.ccs = append(s.ccs, cc) - s.withT.Expect(err).NotTo(HaveOccurred()) - s.withT.Expect(cc.GetState()).NotTo(Equal(connectivity.Shutdown)) + s.clientConnSet = append(s.clientConnSet, cc) + require.NoError(t, err) + require.NotEqual(t, connectivity.Shutdown, cc.GetState()) + return cc, nil } @@ -81,63 +100,120 @@ func (s *bftDelivererTestSetup) beforeEach() { s.fakeOrdererConnectionSource.RandomEndpointReturns(&orderers.Endpoint{ Address: "orderer-address-1", }, nil) - s.fakeOrdererConnectionSource.EndpointsReturns( - []*orderers.Endpoint{ - { - Address: "orderer-address-1", - RootCerts: nil, - Refreshed: make(chan struct{}), - }, - { - Address: "orderer-address-2", - RootCerts: nil, - Refreshed: make(chan struct{}), - }, - { - Address: "orderer-address-3", - RootCerts: nil, - Refreshed: make(chan struct{}), - }, - { - Address: "orderer-address-4", - RootCerts: nil, - Refreshed: make(chan struct{}), - }, - }) + sources := []*orderers.Endpoint{ + { + Address: "orderer-address-1", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-2", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-3", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-4", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + } + s.fakeOrdererConnectionSource.ShuffledEndpointsReturns(sources) + + s.fakeSigner.SignReturns([]byte("good-sig"), nil) + s.fakeDeliverClient.RecvStub = func() (*orderer.DeliverResponse, error) { select { - case <-s.recvStep: - return nil, fmt.Errorf("fake-recv-step-error") - case <-s.doneC: + case r := <-s.recvStepC: + if r == nil { + return nil, fmt.Errorf("fake-recv-step-error") + } + return r, nil + case <-s.deliverClientDoneC: return nil, nil } } s.fakeDeliverClient.CloseSendStub = func() error { select { - case s.recvStep <- struct{}{}: - case <-s.doneC: + case s.recvStepC <- nil: + case <-s.deliverClientDoneC: } + return nil } s.fakeDeliverStreamer.DeliverReturns(s.fakeDeliverClient, nil) + // Censorship monitor creation. + // The monitor can be created multiple times during a test. + // The monitor allows to send error events to the BFTDeliverer, be stopped, and block the monitor goroutine. + s.fakeCensorshipMonFactory.CreateCalls( + func( + chID string, + verifier blocksprovider.BlockVerifier, + requester blocksprovider.DeliverClientRequester, + reporter blocksprovider.BlockProgressReporter, + endpoints []*orderers.Endpoint, + index int, + config blocksprovider.TimeoutConfig, + ) blocksprovider.CensorshipDetector { + monErrC := make(chan error, 1) + monDoneC := make(chan struct{}) + monEndC := make(chan struct{}) + + mon := &fake.CensorshipDetector{} + mon.ErrorsChannelCalls(func() <-chan error { + return monErrC + }) + mon.MonitorCalls(func() { + <-monDoneC + close(monEndC) + }, + ) + mon.StopCalls(func() { + select { + case <-monDoneC: + default: + close(monDoneC) + } + }) + + s.mutex.Lock() + defer s.mutex.Unlock() + + s.fakeCensorshipMon = mon + s.monitorSet = append(s.monitorSet, s.fakeCensorshipMon) + s.monEndCSet = append(s.monEndCSet, monEndC) + s.monErrC = monErrC + s.monDoneC = monDoneC + s.monEndC = monEndC + + return mon + }) + s.d = &blocksprovider.BFTDeliverer{ - ChannelID: "channel-id", - BlockHandler: s.fakeBlockHandler, - Ledger: s.fakeLedgerInfo, - BlockVerifier: s.fakeBlockVerifier, - Dialer: s.fakeDialer, - Orderers: s.fakeOrdererConnectionSource, - DoneC: make(chan struct{}), - Signer: s.fakeSigner, - DeliverStreamer: s.fakeDeliverStreamer, - Logger: flogging.MustGetLogger("blocksprovider"), - TLSCertHash: []byte("tls-cert-hash"), - MaxRetryDuration: time.Hour, - MaxRetryInterval: 10 * time.Second, - InitialRetryInterval: 100 * time.Millisecond, + ChannelID: "channel-id", + BlockHandler: s.fakeBlockHandler, + Ledger: s.fakeLedgerInfo, + BlockVerifier: s.fakeBlockVerifier, + Dialer: s.fakeDialer, + Orderers: s.fakeOrdererConnectionSource, + DoneC: make(chan struct{}), + Signer: s.fakeSigner, + DeliverStreamer: s.fakeDeliverStreamer, + CensorshipDetectorFactory: s.fakeCensorshipMonFactory, + Logger: flogging.MustGetLogger("BFTDeliverer.test"), + TLSCertHash: []byte("tls-cert-hash"), + MaxRetryInterval: 10 * time.Second, + InitialRetryInterval: 100 * time.Millisecond, + BlockCensorshipTimeout: 20 * time.Second, + MaxRetryDuration: 600 * time.Second, + MaxRetryDurationExceededHandler: s.fakeDurationExceededHandler.DurationExceededHandler, } s.d.Initialize() @@ -146,53 +222,932 @@ func (s *bftDelivererTestSetup) beforeEach() { blocksprovider.SetSleeper(s.d, s.fakeSleeper) } -func (s *bftDelivererTestSetup) justBeforeEach() { +func (s *bftDelivererTestSetup) start() { go func() { s.d.DeliverBlocks() close(s.endC) }() } -func (s *bftDelivererTestSetup) afterEach() { +func (s *bftDelivererTestSetup) stop() { s.d.Stop() - close(s.doneC) + + select { + case <-s.deliverClientDoneC: + default: + close(s.deliverClientDoneC) + } + <-s.endC } -func TestBFTDeliverer(t *testing.T) { - t.Run("waits patiently for new blocks from the orderer", func(t *testing.T) { +func (s *bftDelivererTestSetup) assertEventuallyMonitorCallCount(n int) { + s.gWithT.Eventually( + func() int { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.fakeCensorshipMon.MonitorCallCount() + }).Should(Equal(n)) +} + +func TestBFTDeliverer_NoBlocks(t *testing.T) { + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + startTime := time.Now() + setup.start() + + t.Log("Checks the ledger height") + require.Eventually(t, func() bool { + return setup.fakeLedgerInfo.LedgerHeightCallCount() == 1 + }, time.Second, 10*time.Millisecond) + + t.Log("Get the endpoints") + setup.gWithT.Eventually(setup.fakeOrdererConnectionSource.ShuffledEndpointsCallCount).Should(Equal(1)) + + t.Log("Signs the seek request") + setup.gWithT.Eventually(setup.fakeSigner.SignCallCount).Should(Equal(1)) + + t.Log("Seeks the correct block") + setup.gWithT.Eventually(setup.fakeDeliverClient.SendCallCount).Should(Equal(1)) + env := setup.fakeDeliverClient.SendArgsForCall(0) + require.True(t, bytes.Equal(env.GetSignature(), []byte("good-sig"))) + payload, err := protoutil.UnmarshalPayload(env.GetPayload()) + require.NoError(t, err) + seekInfo := &orderer.SeekInfo{} + err = proto.Unmarshal(payload.Data, seekInfo) + require.NoError(t, err) + require.Equal(t, uint64(7), seekInfo.GetStart().GetSpecified().GetNumber()) + + t.Log("Creates and starts the monitor") + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(1)) + setup.assertEventuallyMonitorCallCount(1) + + t.Log("Dials to an orderer from the shuffled endpoints") + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(1)) + addr, tlsCerts := setup.fakeDialer.DialArgsForCall(0) + require.Equal(t, "orderer-address-1", addr) + require.Nil(t, tlsCerts) // TODO add tests that verify this + + t.Log("waits patiently for new blocks from the orderer") + require.Condition(t, func() (success bool) { + select { + case <-setup.endC: + return false + case <-setup.monEndC: + return false + case <-time.After(100 * time.Millisecond): + return true + } + }, "channels wrongly closed") + + t.Log("block progress is reported correctly") + bNum, bTime := setup.d.BlockProgress() + require.Equal(t, uint64(6), bNum) + require.True(t, bTime.After(startTime)) + + t.Log("client connection is active") + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + require.NotEqual(t, connectivity.Shutdown, setup.clientConnSet[0].GetState(), + "client connection unexpectedly shut down") + }() + + setup.stop() +} + +func TestBFTDeliverer_FatalErrors(t *testing.T) { + t.Run("Ledger height returns an error", func(t *testing.T) { setup := newBFTDelivererTestSetup(t) - setup.beforeEach() - setup.justBeforeEach() + setup.initialize(t) + setup.fakeLedgerInfo.LedgerHeightReturns(0, fmt.Errorf("fake-ledger-error")) + setup.start() + + t.Log("Exits the DeliverBlocks loop") + setup.gWithT.Eventually(setup.endC).Should(BeClosed()) + require.Equal(t, 0, setup.fakeCensorshipMonFactory.CreateCallCount(), "monitor was not created") + + setup.stop() + }) + + t.Run("Fails to sign seek request", func(t *testing.T) { + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + setup.fakeSigner.SignReturns(nil, fmt.Errorf("fake-ledger-error")) + setup.start() + + t.Log("Starts the DeliverBlocks and Monitor loop") + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(1)) + setup.assertEventuallyMonitorCallCount(1) + + t.Log("Exits the DeliverBlocks and Monitor loop") + setup.gWithT.Eventually(setup.endC).Should(BeClosed()) + setup.gWithT.Eventually(setup.monEndC).Should(BeClosed()) + + setup.stop() + }) + + t.Run("No endpoints", func(t *testing.T) { + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + setup.fakeOrdererConnectionSource.ShuffledEndpointsReturns(nil) + setup.start() + + t.Log("Starts the DeliverBlocks and Monitor loop") + setup.gWithT.Eventually(setup.fakeOrdererConnectionSource.ShuffledEndpointsCallCount).Should(Equal(1)) + t.Log("Exits the DeliverBlocks loop") + setup.gWithT.Eventually(setup.endC).Should(BeClosed()) + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(0)) + require.Nil(t, setup.fakeCensorshipMon) + + setup.stop() + }) +} + +func TestBFTDeliverer_DialRetries(t *testing.T) { + t.Run("Dial returns error, then succeeds", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + setup.fakeDialer.DialReturnsOnCall(0, nil, fmt.Errorf("fake-dial-error")) + cc, err := grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) + setup.gWithT.Expect(err).NotTo(HaveOccurred()) + setup.fakeDialer.DialReturnsOnCall(1, cc, nil) + + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(2)) + + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(2)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(1)) + setup.gWithT.Expect(setup.fakeSleeper.SleepArgsForCall(0)).To(Equal(100 * time.Millisecond)) + + setup.stop() + + setup.mutex.Lock() + defer setup.mutex.Unlock() + require.Len(t, setup.monitorSet, 2) + for i, mon := range setup.monitorSet { + require.Equal(t, 1, mon.MonitorCallCount()) + require.Equal(t, 1, mon.StopCallCount()) + <-setup.monEndCSet[i] + } + }) + + t.Run("Dial returns several consecutive errors, exponential backoff, then succeeds", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + // 6 rounds + for i := 0; i < 24; i++ { + setup.fakeDialer.DialReturnsOnCall(i, nil, fmt.Errorf("fake-dial-error")) + } - setup.withT.Consistently(setup.endC).ShouldNot(BeClosed()) + cc, err := grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) + setup.gWithT.Expect(err).NotTo(HaveOccurred()) + setup.fakeDialer.DialReturnsOnCall(24, cc, nil) + + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(25)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(25)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(24)) + + t.Log("Exponential backoff after every round") + minDur := 100 * time.Millisecond + for i := 0; i < 24; i++ { + round := (i + 1) / 4 + fDur := math.Min(float64(minDur.Nanoseconds())*math.Pow(2.0, float64(round)), float64(10*time.Second)) + dur := time.Duration(fDur) + assert.Equal(t, dur, setup.fakeSleeper.SleepArgsForCall(i), fmt.Sprintf("i=%d", i)) + } + + setup.stop() + + setup.mutex.Lock() + defer setup.mutex.Unlock() + require.Len(t, setup.monitorSet, 25) + for i, mon := range setup.monitorSet { + require.Equal(t, 1, mon.MonitorCallCount()) + require.Equal(t, 1, mon.StopCallCount()) + <-setup.monEndCSet[i] + } + + t.Log("Cycles through all sources") + addresses := make(map[string]bool) + addr1, _ := setup.fakeDialer.DialArgsForCall(0) + for i := 1; i < setup.fakeDialer.DialCallCount(); i++ { + addr2, _ := setup.fakeDialer.DialArgsForCall(i) + require.NotEqual(t, addr1, addr2) + addresses[addr1] = true + addr1 = addr2 + } + require.Len(t, addresses, 4) + }) + + t.Run("Dial returns repeated consecutive errors, exponential backoff saturates", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + setup.fakeDialer.DialReturns(nil, fmt.Errorf("fake-dial-error")) + + setup.start() + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(BeNumerically(">=", 100)) + t.Log("Calls the handler but does not stop") + setup.gWithT.Eventually(setup.fakeDurationExceededHandler.DurationExceededHandlerCallCount).Should(BeNumerically(">", 5)) + setup.gWithT.Consistently(setup.endC).ShouldNot(BeClosed()) + setup.stop() + + t.Log("Exponential backoff after every round, with saturation of 10s") + minDur := 100 * time.Millisecond + for i := 0; i < setup.fakeSleeper.SleepCallCount(); i++ { + round := (i + 1) / 4 + fDur := math.Min(float64(minDur.Nanoseconds())*math.Pow(2.0, float64(round)), float64(10*time.Second)) + dur := time.Duration(fDur) + assert.Equal(t, dur, setup.fakeSleeper.SleepArgsForCall(i), fmt.Sprintf("i=%d", i)) + } + + var monSet []*fake.CensorshipDetector func() { setup.mutex.Lock() defer setup.mutex.Unlock() - setup.withT.Expect(setup.ccs[0].GetState()).NotTo(Equal(connectivity.Shutdown)) + monSet = setup.monitorSet }() - setup.afterEach() + for i, mon := range monSet { + <-setup.monEndCSet[i] + require.Equal(t, 1, mon.MonitorCallCount(), fmt.Sprintf("i=%d", i)) + require.Equal(t, 1, mon.StopCallCount(), fmt.Sprintf("i=%d", i)) + } }) - t.Run("checks the ledger height", func(t *testing.T) { + t.Run("Dial returns repeated consecutive errors, total sleep larger than MaxRetryDuration", func(t *testing.T) { + flogging.ActivateSpec("debug") setup := newBFTDelivererTestSetup(t) - setup.beforeEach() - setup.justBeforeEach() + setup.initialize(t) - setup.withT.Eventually(setup.fakeLedgerInfo.LedgerHeightCallCount).Should(Equal(1)) + setup.fakeDurationExceededHandler.DurationExceededHandlerReturns(true) - setup.afterEach() + setup.fakeDialer.DialReturns(nil, fmt.Errorf("fake-dial-error")) + + setup.start() + t.Log("Calls handler and stops") + setup.gWithT.Eventually(setup.fakeDurationExceededHandler.DurationExceededHandlerCallCount).Should(Equal(1)) + setup.gWithT.Eventually(setup.endC).Should(BeClosed()) + + t.Log("Exponential backoff after every round, with saturation of 10s") + minDur := 100 * time.Millisecond + totalDur := time.Duration(0) + for i := 0; i < setup.fakeSleeper.SleepCallCount(); i++ { + round := (i + 1) / 4 + fDur := math.Min(float64(minDur.Nanoseconds())*math.Pow(2.0, float64(round)), float64(10*time.Second)) + dur := time.Duration(fDur) + assert.Equal(t, dur, setup.fakeSleeper.SleepArgsForCall(i), fmt.Sprintf("i=%d", i)) + totalDur += dur + } + + require.True(t, totalDur > setup.d.MaxRetryDuration) + require.Equal(t, 82, setup.fakeSleeper.SleepCallCount()) + + var monSet []*fake.CensorshipDetector + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + monSet = setup.monitorSet + }() + + for i, mon := range monSet { + <-setup.monEndCSet[i] + require.Equal(t, 1, mon.MonitorCallCount(), fmt.Sprintf("i=%d", i)) + if i == 82 { + require.Equal(t, 2, mon.StopCallCount(), fmt.Sprintf("i=%d", i)) + } else { + require.Equal(t, 1, mon.StopCallCount(), fmt.Sprintf("i=%d", i)) + } + } }) +} - t.Run("when the ledger returns an error", func(t *testing.T) { +func TestBFTDeliverer_DeliverRetries(t *testing.T) { + t.Run("Deliver returns error, then succeeds", func(t *testing.T) { + flogging.ActivateSpec("debug") setup := newBFTDelivererTestSetup(t) - setup.beforeEach() - setup.fakeLedgerInfo.LedgerHeightReturns(0, fmt.Errorf("fake-ledger-error")) - setup.justBeforeEach() + setup.initialize(t) + + setup.fakeDeliverStreamer.DeliverReturnsOnCall(0, nil, fmt.Errorf("deliver-error")) + setup.fakeDeliverStreamer.DeliverReturnsOnCall(1, setup.fakeDeliverClient, nil) + + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(2)) + + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(2)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(1)) + setup.gWithT.Expect(setup.fakeSleeper.SleepArgsForCall(0)).To(Equal(100 * time.Millisecond)) + + setup.stop() + + setup.mutex.Lock() + defer setup.mutex.Unlock() + require.Len(t, setup.monitorSet, 2) + for i, mon := range setup.monitorSet { + require.Equal(t, 1, mon.MonitorCallCount()) + require.Equal(t, 1, mon.StopCallCount()) + <-setup.monEndCSet[i] + } + }) + + t.Run("Deliver returns several consecutive errors, exponential backoff, then succeeds", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + // 6 rounds + for i := 0; i < 24; i++ { + setup.fakeDeliverStreamer.DeliverReturnsOnCall(i, nil, fmt.Errorf("deliver-error")) + } + setup.fakeDeliverStreamer.DeliverReturnsOnCall(24, setup.fakeDeliverClient, nil) + + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(25)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(25)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(24)) + + t.Log("Exponential backoff after every round") + minDur := 100 * time.Millisecond + for i := 0; i < 24; i++ { + round := (i + 1) / 4 + fDur := math.Min(float64(minDur.Nanoseconds())*math.Pow(2.0, float64(round)), float64(10*time.Second)) + dur := time.Duration(fDur) + assert.Equal(t, dur, setup.fakeSleeper.SleepArgsForCall(i), fmt.Sprintf("i=%d", i)) + } + + setup.stop() + + setup.mutex.Lock() + defer setup.mutex.Unlock() + require.Len(t, setup.monitorSet, 25) + for i, mon := range setup.monitorSet { + require.Equal(t, 1, mon.MonitorCallCount()) + require.Equal(t, 1, mon.StopCallCount()) + <-setup.monEndCSet[i] + } + + t.Log("Cycles through all sources") + addresses := make(map[string]bool) + addr1, _ := setup.fakeDialer.DialArgsForCall(0) + for i := 1; i < setup.fakeDialer.DialCallCount(); i++ { + addr2, _ := setup.fakeDialer.DialArgsForCall(i) + require.NotEqual(t, addr1, addr2) + addresses[addr1] = true + addr1 = addr2 + } + require.Len(t, addresses, 4) + }) + + t.Run("Deliver returns repeated consecutive errors, exponential backoff saturates", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + setup.fakeDeliverStreamer.DeliverReturns(nil, fmt.Errorf("deliver-error")) + + setup.start() + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(BeNumerically(">=", 40)) + setup.stop() + + t.Log("Exponential backoff after every round, with saturation of 10s") + minDur := 100 * time.Millisecond + for i := 0; i < setup.fakeSleeper.SleepCallCount(); i++ { + round := (i + 1) / 4 + fDur := math.Min(float64(minDur.Nanoseconds())*math.Pow(2.0, float64(round)), float64(10*time.Second)) + dur := time.Duration(fDur) + assert.Equal(t, dur, setup.fakeSleeper.SleepArgsForCall(i), fmt.Sprintf("i=%d", i)) + } + + var monSet []*fake.CensorshipDetector + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + monSet = setup.monitorSet + }() + + for i, mon := range monSet { + <-setup.monEndCSet[i] + require.Equal(t, 1, mon.MonitorCallCount(), fmt.Sprintf("i=%d", i)) + require.Equal(t, 1, mon.StopCallCount(), fmt.Sprintf("i=%d", i)) + } + }) +} + +func TestBFTDeliverer_BlockReception(t *testing.T) { + t.Run("Block is valid", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + startTime := time.Now() + + t.Log("block progress is reported correctly before start") + bNum, bTime := setup.d.BlockProgress() + require.Equal(t, uint64(0), bNum) + require.True(t, bTime.IsZero()) + + setup.start() + + setup.gWithT.Eventually(setup.fakeLedgerInfo.LedgerHeightCallCount).Should(Equal(1)) + bNum, bTime = setup.d.BlockProgress() + require.Equal(t, uint64(6), bNum) + require.True(t, bTime.After(startTime)) - setup.withT.Eventually(setup.endC).Should(BeClosed()) + t.Log("Recv() returns a single block, num: 7") + setup.recvStepC <- &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{Header: &common.BlockHeader{Number: 7}}, + }, + } + + t.Log("receives the block and loops, not sleeping") + setup.gWithT.Eventually(setup.fakeDeliverClient.RecvCallCount).Should(Equal(2)) + require.Equal(t, 0, setup.fakeSleeper.SleepCallCount()) + + t.Log("checks the validity of the block") + setup.gWithT.Eventually(setup.fakeBlockVerifier.VerifyBlockCallCount).Should(Equal(1)) + channelID, blockNum, block := setup.fakeBlockVerifier.VerifyBlockArgsForCall(0) + require.Equal(t, "channel-id", channelID.String()) + require.Equal(t, uint64(7), blockNum) + require.True(t, proto.Equal(block, &common.Block{Header: &common.BlockHeader{Number: 7}})) + + t.Log("handle the block") + setup.gWithT.Eventually(setup.fakeBlockHandler.HandleBlockCallCount).Should(Equal(1)) + channelName, block2 := setup.fakeBlockHandler.HandleBlockArgsForCall(0) + require.Equal(t, "channel-id", channelName) + require.True(t, proto.Equal(block2, &common.Block{Header: &common.BlockHeader{Number: 7}})) + + t.Log("block progress is reported correctly") + bNum2, bTime2 := setup.d.BlockProgress() + require.Equal(t, uint64(7), bNum2) + require.True(t, bTime2.After(bTime)) + + setup.stop() + }) + + t.Run("Block is invalid", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + t.Log("block verification fails") + setup.fakeBlockVerifier.VerifyBlockReturns(fmt.Errorf("fake-verify-error")) + + startTime := time.Now() + setup.start() + + t.Log("Recv() returns a single block, num: 7") + setup.recvStepC <- &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{Header: &common.BlockHeader{Number: 7}}, + }, + } + + t.Log("disconnects, sleeps, and tries again") + setup.gWithT.Eventually(setup.fakeBlockVerifier.VerifyBlockCallCount).Should(Equal(1)) + setup.gWithT.Eventually(setup.fakeSleeper.SleepCallCount).Should(Equal(1)) + require.Equal(t, 1, setup.fakeDeliverClient.CloseSendCallCount()) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(2)) + addr1, _ := setup.fakeDialer.DialArgsForCall(0) + addr2, _ := setup.fakeDialer.DialArgsForCall(1) + require.NotEqual(t, addr1, addr2) + + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + require.Len(t, setup.clientConnSet, 2) + require.Len(t, setup.monitorSet, 2) + }() + + t.Log("does not handle the block") + require.Equal(t, 0, setup.fakeBlockHandler.HandleBlockCallCount()) + + t.Log("block progress is reported correctly") + bNum, bTime := setup.d.BlockProgress() + require.Equal(t, uint64(6), bNum) + require.True(t, bTime.After(startTime)) - setup.afterEach() + setup.stop() }) + + t.Run("Block handling fails", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + t.Log("block verification fails") + setup.fakeBlockHandler.HandleBlockReturns(fmt.Errorf("block-handling-error")) + + startTime := time.Now() + setup.start() + + t.Log("Recv() returns a single block, num: 7") + setup.recvStepC <- &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{Header: &common.BlockHeader{Number: 7}}, + }, + } + + t.Log("disconnects, sleeps, and tries again") + setup.gWithT.Eventually(setup.fakeBlockVerifier.VerifyBlockCallCount).Should(Equal(1)) + setup.gWithT.Eventually(setup.fakeSleeper.SleepCallCount).Should(Equal(1)) + require.Equal(t, 1, setup.fakeDeliverClient.CloseSendCallCount()) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(2)) + + addr1, _ := setup.fakeDialer.DialArgsForCall(0) + addr2, _ := setup.fakeDialer.DialArgsForCall(1) + require.NotEqual(t, addr1, addr2) + + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + require.Len(t, setup.clientConnSet, 2) + require.Len(t, setup.monitorSet, 2) + }() + + t.Log("handle the block") + require.Equal(t, 1, setup.fakeBlockHandler.HandleBlockCallCount()) + + t.Log("block progress is reported correctly") + bNum, bTime := setup.d.BlockProgress() + require.Equal(t, uint64(6), bNum) + require.True(t, bTime.After(startTime) || bTime.Equal(startTime)) + + setup.stop() + }) + + t.Run("Block reception resets failure counter", func(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + // 6 failed rounds, creates exponential backoff + for i := 0; i < 24; i++ { + setup.fakeDialer.DialReturnsOnCall(i, nil, fmt.Errorf("fake-dial-error")) + } + // success + cc, err := grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) + setup.gWithT.Expect(err).NotTo(HaveOccurred()) + require.NotNil(t, cc) + setup.fakeDialer.DialReturns(cc, nil) + + startTime := time.Now() + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(25)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(25)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(24)) + + t.Log("Recv() returns a single block, num: 7") + setup.recvStepC <- &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{Header: &common.BlockHeader{Number: 7}}, + }, + } + + t.Log("receives the block and loops, not sleeping") + setup.gWithT.Eventually(setup.fakeDeliverClient.RecvCallCount).Should(Equal(2)) + require.Equal(t, 24, setup.fakeSleeper.SleepCallCount()) + + t.Log("checks the validity of the block") + setup.gWithT.Eventually(setup.fakeBlockVerifier.VerifyBlockCallCount).Should(Equal(1)) + channelID, blockNum, block := setup.fakeBlockVerifier.VerifyBlockArgsForCall(0) + require.Equal(t, "channel-id", channelID.String()) + require.Equal(t, uint64(7), blockNum) + require.True(t, proto.Equal(block, &common.Block{Header: &common.BlockHeader{Number: 7}})) + + t.Log("handle the block") + setup.gWithT.Eventually(setup.fakeBlockHandler.HandleBlockCallCount).Should(Equal(1)) + channelName, block2 := setup.fakeBlockHandler.HandleBlockArgsForCall(0) + require.Equal(t, "channel-id", channelName) + require.True(t, proto.Equal(block2, &common.Block{Header: &common.BlockHeader{Number: 7}})) + + t.Log("block progress is reported correctly") + bNum, bTime := setup.d.BlockProgress() + require.Equal(t, uint64(7), bNum) + require.True(t, bTime.After(startTime)) + + setup.gWithT.Expect(setup.fakeDialer.DialCallCount()).Should(Equal(25)) + + t.Log("a Recv() error occurs") + setup.fakeDeliverClient.CloseSendStub = nil + setup.recvStepC <- nil + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(26)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(26)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(25)) + + t.Log("failure count was reset, sleep duration returned to minimum") + require.Equal(t, 6400*time.Millisecond, setup.fakeSleeper.SleepArgsForCall(23)) + require.Equal(t, 100*time.Millisecond, setup.fakeSleeper.SleepArgsForCall(24)) + + setup.stop() + }) + + t.Run("Block reception resets total sleep time", func(t *testing.T) { // TODO + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + setup.fakeDurationExceededHandler.DurationExceededHandlerReturns(true) + + // 20 failed rounds, no enough to exceed MaxRetryDuration (it takes 81 calls to go over 10m) + for i := 0; i < 80; i++ { + setup.fakeDialer.DialReturnsOnCall(i, nil, fmt.Errorf("fake-dial-error")) + } + + // another 20 failed rounds, together, it is enough to exceed MaxRetryDuration + for i := 81; i < 160; i++ { + setup.fakeDialer.DialReturnsOnCall(i, nil, fmt.Errorf("fake-dial-error")) + } + + // another 20 failed rounds, together, it is enough to exceed MaxRetryDuration + for i := 161; i < 240; i++ { + setup.fakeDialer.DialReturnsOnCall(i, nil, fmt.Errorf("fake-dial-error")) + } + + // success at attempt 80, 160 and >=240, should reset total sleep time + cc, err := grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) + setup.gWithT.Expect(err).NotTo(HaveOccurred()) + require.NotNil(t, cc) + setup.fakeDialer.DialReturns(cc, nil) + + setup.start() + + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(81)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(80)) + + t.Log("Recv() returns a single block, num: 7") + setup.recvStepC <- &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{Header: &common.BlockHeader{Number: 7}}, + }, + } + + t.Log("receives the block and loops, not sleeping") + setup.gWithT.Eventually(setup.fakeDeliverClient.RecvCallCount).Should(Equal(2)) + require.Equal(t, 80, setup.fakeSleeper.SleepCallCount()) + + t.Log("a Recv() error occurs, more dial attempts") + setup.fakeDeliverClient.CloseSendStub = nil + setup.recvStepC <- nil + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(161)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(160)) + + t.Log("Recv() returns a single block, num: 8") + setup.recvStepC <- &orderer.DeliverResponse{ + Type: &orderer.DeliverResponse_Block{ + Block: &common.Block{Header: &common.BlockHeader{Number: 8}}, + }, + } + + t.Log("receives the block and loops, not sleeping") + setup.gWithT.Eventually(setup.fakeDeliverClient.RecvCallCount).Should(Equal(4)) + require.Equal(t, 160, setup.fakeSleeper.SleepCallCount()) + + t.Log("a Recv() error occurs, more dial attempts") + setup.fakeDeliverClient.CloseSendStub = nil + setup.recvStepC <- nil + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(241)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(240)) + + t.Log("DurationExceededHandler handler is never called, DeliverBlocks() does not stop") + setup.gWithT.Expect(setup.fakeDurationExceededHandler.DurationExceededHandlerCallCount()).To(Equal(0)) + setup.gWithT.Consistently(setup.endC).ShouldNot(BeClosed()) + + setup.stop() + }) +} + +func TestBFTDeliverer_CensorshipMonitorEvents(t *testing.T) { + for _, errVal := range []error{nil, errors.New("some error"), &blocksprovider.ErrFatal{Message: "some fatal error"}, &blocksprovider.ErrStopping{Message: "stopping"}} { + t.Run("unexpected error or value: "+fmt.Sprintf("%v", errVal), func(t *testing.T) { + flogging.ActivateSpec("debug") + + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(1)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(1)) + + // var mon *fake.CensorshipDetector + t.Logf("monitor error channel returns unexpected value: %v", errVal) + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + setup.monErrC <- errVal + }() + + t.Logf("monitor and deliverer exit the loop") + <-setup.endC + <-setup.monEndC + + setup.stop() + }) + } + + t.Run("censorship", func(t *testing.T) { + flogging.ActivateSpec("debug") + + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + setup.start() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(1)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(1)) + + t.Log("monitor error channel returns censorship error") + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + setup.monErrC <- &blocksprovider.ErrCensorship{Message: "censorship"} + }() + + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(2)) + + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(2)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(1)) + setup.gWithT.Expect(setup.fakeSleeper.SleepArgsForCall(0)).To(Equal(100 * time.Millisecond)) + + setup.stop() + }) + + t.Run("repeated censorship events, with exponential backoff", func(t *testing.T) { + flogging.ActivateSpec("debug") + + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + setup.start() + + for n := 1; n <= 40; n++ { + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(n)) + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(n)) + + t.Logf("monitor error channel returns censorship error num: %d", n) + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + setup.monErrC <- &blocksprovider.ErrCensorship{Message: fmt.Sprintf("censorship %d", n)} + }() + + setup.gWithT.Eventually( + func() int { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + return len(setup.monitorSet) + }).Should(Equal(n + 1)) + + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(n + 1)) + setup.gWithT.Expect(setup.fakeSleeper.SleepCallCount()).To(Equal(n)) + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(n + 1)) + + } + + t.Log("Exponential backoff after every round, with saturation") + minDur := 100 * time.Millisecond + for i := 0; i < 40; i++ { + round := (i + 1) / 4 + dur := time.Duration(minDur.Nanoseconds() * int64(math.Pow(2.0, float64(round)))) + if dur > 10*time.Second { + dur = 10 * time.Second + } + assert.Equal(t, dur, setup.fakeSleeper.SleepArgsForCall(i), fmt.Sprintf("i=%d", i)) + } + + setup.stop() + + setup.mutex.Lock() + defer setup.mutex.Unlock() + require.Len(t, setup.monitorSet, 41) + for i, mon := range setup.monitorSet { + require.Equal(t, 1, mon.MonitorCallCount()) + require.Equal(t, 1, mon.StopCallCount()) + <-setup.monEndCSet[i] + } + + t.Log("Cycles through all sources") + addresses := make(map[string]bool) + addr1, _ := setup.fakeDialer.DialArgsForCall(0) + for i := 1; i < setup.fakeDialer.DialCallCount(); i++ { + addr2, _ := setup.fakeDialer.DialArgsForCall(i) + require.NotEqual(t, addr1, addr2) + addresses[addr1] = true + addr1 = addr2 + } + require.Len(t, addresses, 4) + }) +} + +func TestBFTDeliverer_RefreshEndpoints(t *testing.T) { + flogging.ActivateSpec("debug") + setup := newBFTDelivererTestSetup(t) + setup.initialize(t) + + sources1 := []*orderers.Endpoint{ + { + Address: "orderer-address-1", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-2", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-3", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-4", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + } + sources2 := []*orderers.Endpoint{ + { + Address: "orderer-address-5", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-6", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-7", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + { + Address: "orderer-address-8", + RootCerts: nil, + Refreshed: make(chan struct{}), + }, + } + setup.fakeOrdererConnectionSource.ShuffledEndpointsReturnsOnCall(0, sources1) + setup.fakeOrdererConnectionSource.ShuffledEndpointsReturnsOnCall(1, sources2) + + setup.start() + + t.Log("Get the endpoints") + setup.gWithT.Eventually(setup.fakeOrdererConnectionSource.ShuffledEndpointsCallCount).Should(Equal(1)) + + t.Log("Creates and starts the monitor") + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(1)) + setup.assertEventuallyMonitorCallCount(1) + + t.Log("Dials to an orderer from the shuffled endpoints of the first set") + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(1)) + addr, _ := setup.fakeDialer.DialArgsForCall(0) + require.Equal(t, "orderer-address-1", addr) + + t.Log("Closing the refresh channel (always on all endpoints)") + for _, s := range sources1 { + close(s.Refreshed) + } + + t.Log("Get the endpoints again") + setup.gWithT.Eventually(setup.fakeOrdererConnectionSource.ShuffledEndpointsCallCount).Should(Equal(2)) + + t.Log("Creates and starts the monitor") + setup.gWithT.Eventually(setup.fakeCensorshipMonFactory.CreateCallCount).Should(Equal(2)) + func() { + setup.mutex.Lock() + defer setup.mutex.Unlock() + + setup.gWithT.Eventually(func() int { return len(setup.monitorSet) }).Should(Equal(2)) + setup.gWithT.Eventually(setup.monitorSet[1].MonitorCallCount).Should(Equal(1)) + }() + + t.Log("Dials to an orderer from the shuffled endpoints of the second set") + setup.gWithT.Eventually(setup.fakeDialer.DialCallCount).Should(Equal(2)) + addr, _ = setup.fakeDialer.DialArgsForCall(1) + require.Equal(t, "orderer-address-5", addr) + + t.Log("Does not sleep") + require.Equal(t, 0, setup.fakeSleeper.SleepCallCount()) + + setup.stop() } diff --git a/internal/pkg/peer/blocksprovider/bft_header_receiver.go b/internal/pkg/peer/blocksprovider/bft_header_receiver.go new file mode 100644 index 00000000000..3be633e0e4c --- /dev/null +++ b/internal/pkg/peer/blocksprovider/bft_header_receiver.go @@ -0,0 +1,209 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider + +import ( + "sync" + "time" + + "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" +) + +// BFTHeaderReceiver receives a stream of blocks from an orderer, where each block contains a header and metadata. +// It keeps track of the last header it received, and the time it was received. +// The header receivers verify each block as it arrives. +// +// TODO The header receiver will receive (or ask for) full config blocks - in a later commit. +// TODO The header receiver will maintain its own private block verifier (bundle) - in a later commit. +type BFTHeaderReceiver struct { + mutex sync.Mutex + chainID string + stop bool + stopChan chan struct{} + started bool + errorStopTime time.Time + endpoint string + client orderer.AtomicBroadcast_DeliverClient + blockVerifier BlockVerifier + + // A block with Header & Metadata, without Data (i.e. lastHeader.Data==nil); TODO except from config blocks, which are full. + lastHeader *common.Block + // The time lastHeader was received, or time.Time{} + lastHeaderTime time.Time + + logger *flogging.FabricLogger +} + +// NewBFTHeaderReceiver create a new BFTHeaderReceiver. +// +// If the previousReceiver is not nil, the lastHeader and lastHeaderTime are copied to the new instance. +// This allows a new receiver to start from the last know good header that has been received. +func NewBFTHeaderReceiver( + chainID string, + endpoint string, + client orderer.AtomicBroadcast_DeliverClient, + msgVerifier BlockVerifier, + previousReceiver *BFTHeaderReceiver, + logger *flogging.FabricLogger, +) *BFTHeaderReceiver { + hRcv := &BFTHeaderReceiver{ + chainID: chainID, + stopChan: make(chan struct{}, 1), + endpoint: endpoint, + client: client, + blockVerifier: msgVerifier, + logger: logger, + } + + if previousReceiver != nil { + block, bTime, err := previousReceiver.LastBlock() + if err == nil { + hRcv.lastHeader = block + hRcv.lastHeaderTime = bTime + } + } + + return hRcv +} + +// DeliverHeaders starts to deliver headers from the stream client +func (hr *BFTHeaderReceiver) DeliverHeaders() { + var normalExit bool + + defer func() { + if !normalExit { + hr.mutex.Lock() + hr.errorStopTime = time.Now() + hr.mutex.Unlock() + } + _ = hr.Stop() + hr.logger.Debugf("[%s][%s] Stopped to deliver headers", hr.chainID, hr.endpoint) + }() + + hr.logger.Debugf("[%s][%s] Starting to deliver headers", hr.chainID, hr.endpoint) + hr.setStarted() + + for !hr.IsStopped() { + msg, err := hr.client.Recv() + if err != nil { + hr.logger.Debugf("[%s][%s] Receive error: %s", hr.chainID, hr.endpoint, err.Error()) + return + } + + switch t := msg.GetType().(type) { + case *orderer.DeliverResponse_Status: + if t.Status == common.Status_SUCCESS { + hr.logger.Warningf("[%s][%s] Warning! Received %s for a seek that should never complete", hr.chainID, hr.endpoint, t.Status) + return + } + + hr.logger.Errorf("[%s][%s] Got bad status %s", hr.chainID, hr.endpoint, t.Status) + return + + case *orderer.DeliverResponse_Block: + blockNum := t.Block.Header.Number + + err := hr.blockVerifier.VerifyBlockAttestation(hr.chainID, t.Block) + if err != nil { + hr.logger.Warningf("[%s][%s] Last block verification failed, blockNum [%d], err: %s", hr.chainID, hr.endpoint, blockNum, err) + return + } + + if protoutil.IsConfigBlock(t.Block) { // blocks with block.Data==nil return false + hr.logger.Debugf("[%s][%s] Applying config block to block verifier, blockNum = [%d]", hr.chainID, hr.endpoint, blockNum) + // TODO + } + + hr.logger.Debugf("[%s][%s] Saving block header & metadata, blockNum = [%d]", hr.chainID, hr.endpoint, blockNum) + hr.mutex.Lock() + hr.lastHeader = t.Block + hr.lastHeaderTime = time.Now() + hr.mutex.Unlock() + + default: + hr.logger.Warningf("[%s][%s] Received unknown response type: %v", hr.chainID, hr.endpoint, t) + return + } + } + + normalExit = true +} + +func (hr *BFTHeaderReceiver) IsStopped() bool { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + return hr.stop +} + +func (hr *BFTHeaderReceiver) IsStarted() bool { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + return hr.started +} + +func (hr *BFTHeaderReceiver) setStarted() { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + hr.started = true +} + +func (hr *BFTHeaderReceiver) GetErrorStopTime() time.Time { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + return hr.errorStopTime +} + +// Stop the reception of headers and close the client connection +func (hr *BFTHeaderReceiver) Stop() error { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + if hr.stop { + hr.logger.Infof("[%s][%s] Already stopped", hr.chainID, hr.endpoint) + return nil + } + + hr.logger.Infof("[%s][%s] Stopping", hr.chainID, hr.endpoint) + hr.stop = true + _ = hr.client.CloseSend() + // TODO close the underlying connection as well + close(hr.stopChan) + + return nil +} + +// LastBlockNum returns the last block number which was verified +func (hr *BFTHeaderReceiver) LastBlockNum() (uint64, time.Time, error) { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + if hr.lastHeader == nil { + return 0, time.Time{}, errors.New("not found") + } + + return hr.lastHeader.Header.Number, hr.lastHeaderTime, nil +} + +// LastBlock returns the last block which was verified +func (hr *BFTHeaderReceiver) LastBlock() (*common.Block, time.Time, error) { + hr.mutex.Lock() + defer hr.mutex.Unlock() + + if hr.lastHeader == nil { + return nil, time.Time{}, errors.New("not found") + } + + return hr.lastHeader, hr.lastHeaderTime, nil +} diff --git a/internal/pkg/peer/blocksprovider/bft_header_receiver_test.go b/internal/pkg/peer/blocksprovider/bft_header_receiver_test.go new file mode 100644 index 00000000000..b7f0e00f3fd --- /dev/null +++ b/internal/pkg/peer/blocksprovider/bft_header_receiver_test.go @@ -0,0 +1,310 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider_test + +import ( + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider/fake" + "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBftHeaderReceiver_NoBlocks_RecvError(t *testing.T) { + fakeBlockVerifier := &fake.BlockVerifier{} + fakeBlockVerifier.VerifyBlockAttestationReturns(fmt.Errorf("fake-verify-error")) + + streamClientMock := &fake.DeliverClient{} + streamClientMock.RecvReturns(nil, errors.New("oops")) + streamClientMock.CloseSendReturns(nil) + + hr := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, nil, flogging.MustGetLogger("test.BFTHeaderReceiver")) + assert.NotNil(t, hr) + assert.False(t, hr.IsStarted()) + assert.False(t, hr.IsStopped()) + _, _, err := hr.LastBlockNum() + assert.EqualError(t, err, "not found") + + hr.DeliverHeaders() // it will get a Recv() error and exit + + assert.Eventually(t, hr.IsStarted, time.Second, time.Millisecond) + assert.Eventually(t, hr.IsStopped, time.Second, time.Millisecond) + _, _, err = hr.LastBlockNum() + assert.EqualError(t, err, "not found") + assert.Equal(t, fakeBlockVerifier.VerifyBlockAttestationCallCount(), 0) + assert.Equal(t, 1, streamClientMock.RecvCallCount()) +} + +func TestBftHeaderReceiver_BadStatus(t *testing.T) { + fakeBlockVerifier := &fake.BlockVerifier{} + fakeBlockVerifier.VerifyBlockAttestationReturns(fmt.Errorf("fake-verify-error")) + + streamClientMock := &fake.DeliverClient{} + streamClientMock.RecvReturnsOnCall(0, &orderer.DeliverResponse{Type: &orderer.DeliverResponse_Status{Status: common.Status_SUCCESS}}, nil) + streamClientMock.RecvReturnsOnCall(1, &orderer.DeliverResponse{Type: &orderer.DeliverResponse_Status{Status: common.Status_BAD_REQUEST}}, nil) + streamClientMock.RecvReturnsOnCall(2, &orderer.DeliverResponse{Type: &orderer.DeliverResponse_Status{Status: common.Status_SERVICE_UNAVAILABLE}}, nil) + streamClientMock.CloseSendReturns(nil) + + for i := 0; i < 3; i++ { + hr := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, nil, flogging.MustGetLogger("test.BFTHeaderReceiver")) + assert.NotNil(t, hr) + + hr.DeliverHeaders() // it will get a bad status and exit + assert.Eventually(t, hr.IsStarted, time.Second, time.Millisecond) + assert.Eventually(t, hr.IsStopped, time.Second, time.Millisecond) + _, _, err := hr.LastBlockNum() + assert.EqualError(t, err, "not found") + assert.Equal(t, fakeBlockVerifier.VerifyBlockAttestationCallCount(), 0) + } +} + +func TestBftHeaderReceiver_NilResponse(t *testing.T) { + fakeBlockVerifier := &fake.BlockVerifier{} + fakeBlockVerifier.VerifyBlockAttestationReturns(fmt.Errorf("fake-verify-error")) + + streamClientMock := &fake.DeliverClient{} + streamClientMock.RecvReturns(nil, nil) + streamClientMock.CloseSendReturns(nil) + + hr := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, nil, flogging.MustGetLogger("test.BFTHeaderReceiver")) + assert.NotNil(t, hr) + + hr.DeliverHeaders() // it will get a bad status and exit + assert.Eventually(t, hr.IsStarted, time.Second, time.Millisecond) + assert.Eventually(t, hr.IsStopped, time.Second, time.Millisecond) + _, _, err := hr.LastBlockNum() + assert.EqualError(t, err, "not found") + assert.Equal(t, fakeBlockVerifier.VerifyBlockAttestationCallCount(), 0) +} + +func TestBftHeaderReceiver_WithBlocks_Renew(t *testing.T) { + flogging.ActivateSpec("debug") + fakeBlockVerifier := &fake.BlockVerifier{} + streamClientMock := &fake.DeliverClient{} + hr := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, nil, flogging.MustGetLogger("test.BFTHeaderReceiver")) + + seqCh := make(chan uint64) + streamClientMock.RecvCalls( + func() (*orderer.DeliverResponse, error) { + time.Sleep(time.Millisecond) + + seqNew, ok := <-seqCh + if ok { + return prepareBlock(seqNew, orderer.SeekInfo_HEADER_WITH_SIG, uint32(1)), nil + } else { + return nil, errors.New("test closed") + } + }, + ) + streamClientMock.CloseSendReturns(nil) + fakeBlockVerifier.VerifyBlockAttestationCalls(naiveBlockVerifier) + + go hr.DeliverHeaders() + + var bNum uint64 + var bTime time.Time + var err error + + seqCh <- uint64(1) + require.Eventually(t, func() bool { + bNum, bTime, err = hr.LastBlockNum() + return err == nil && bNum == uint64(1) && !bTime.IsZero() + }, time.Second, time.Millisecond) + + bTimeOld := bTime + seqCh <- uint64(2) + require.Eventually(t, func() bool { + bNum, bTime, err = hr.LastBlockNum() + return err == nil && bNum == uint64(2) && bTime.After(bTimeOld) + }, time.Second, time.Millisecond) + + err = hr.Stop() + assert.NoError(t, err) + + bTimeOld = bTime + bNum, bTime, err = hr.LastBlockNum() + assert.NoError(t, err) + assert.Equal(t, uint64(2), bNum) + assert.Equal(t, bTime, bTimeOld) + + assert.Equal(t, fakeBlockVerifier.VerifyBlockAttestationCallCount(), 2) + + //=== Create a new BFTHeaderReceiver with the last good header of the previous receiver + fakeBlockVerifier = &fake.BlockVerifier{} + streamClientMock = &fake.DeliverClient{} + hr2 := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, hr, flogging.MustGetLogger("test.BFTHeaderReceiver.2")) + assert.False(t, hr2.IsStarted()) + assert.False(t, hr2.IsStopped()) + bNum, bTime, err = hr2.LastBlockNum() + assert.NoError(t, err) + assert.Equal(t, uint64(2), bNum) + assert.Equal(t, bTime, bTimeOld) +} + +func TestBftHeaderReceiver_WithBlocks_StopOnVerificationFailure(t *testing.T) { + flogging.ActivateSpec("debug") + fakeBlockVerifier := &fake.BlockVerifier{} + streamClientMock := &fake.DeliverClient{} + hr := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, nil, flogging.MustGetLogger("test.BFTHeaderReceiver")) + + seqCh := make(chan uint64) + goodSig := uint32(1) + streamClientMock.RecvCalls( + func() (*orderer.DeliverResponse, error) { + time.Sleep(time.Millisecond) + + seqNew, ok := <-seqCh + if ok { + return prepareBlock(seqNew, orderer.SeekInfo_HEADER_WITH_SIG, atomic.LoadUint32(&goodSig)), nil + } else { + return nil, errors.New("test closed") + } + }, + ) + streamClientMock.CloseSendReturns(nil) + fakeBlockVerifier.VerifyBlockAttestationCalls(naiveBlockVerifier) + + go hr.DeliverHeaders() + + var bNum uint64 + var bTime time.Time + var err error + + seqCh <- uint64(1) + require.Eventually(t, func() bool { + bNum, bTime, err = hr.LastBlockNum() + return err == nil && bNum == uint64(1) && !bTime.IsZero() + }, time.Second, time.Millisecond) + + bTimeOld := bTime + seqCh <- uint64(2) + require.Eventually(t, func() bool { + bNum, bTime, err = hr.LastBlockNum() + return err == nil && bNum == uint64(2) && bTime.After(bTimeOld) + }, time.Second, time.Millisecond) + + // Invalid block sig causes the receiver to close + atomic.StoreUint32(&goodSig, 0) + seqCh <- uint64(3) + require.Eventually(t, hr.IsStopped, time.Second, time.Millisecond) + + // After the receiver closes, it returns the last good header + bTimeOld = bTime + bNum, bTime, err = hr.LastBlockNum() + assert.NoError(t, err) + assert.Equal(t, uint64(2), bNum) + assert.Equal(t, bTime, bTimeOld) + + assert.Equal(t, fakeBlockVerifier.VerifyBlockAttestationCallCount(), 3) +} + +func TestBftHeaderReceiver_VerifyOnce(t *testing.T) { + flogging.ActivateSpec("debug") + fakeBlockVerifier := &fake.BlockVerifier{} + streamClientMock := &fake.DeliverClient{} + hr := blocksprovider.NewBFTHeaderReceiver("testchannel", "10.10.10.11:666", streamClientMock, fakeBlockVerifier, nil, flogging.MustGetLogger("test.BFTHeaderReceiver")) + + seqCh := make(chan uint64) + goodSig := uint32(1) + streamClientMock.RecvCalls( + func() (*orderer.DeliverResponse, error) { + time.Sleep(time.Millisecond) + + seqNew, ok := <-seqCh + if ok { + return prepareBlock(seqNew, orderer.SeekInfo_HEADER_WITH_SIG, atomic.LoadUint32(&goodSig)), nil + } else { + return nil, errors.New("test closed") + } + }, + ) + streamClientMock.CloseSendReturns(nil) + fakeBlockVerifier.VerifyBlockAttestationCalls(naiveBlockVerifier) + + go hr.DeliverHeaders() + + seqCh <- uint64(5) + require.Eventually(t, func() bool { + bNum, bTime, err := hr.LastBlockNum() + return err == nil && bNum == uint64(5) && !bTime.IsZero() + }, time.Second, time.Millisecond) + + for i := 0; i < 10; i++ { + bNum, bTime, err := hr.LastBlockNum() + assert.NoError(t, err) + assert.Equal(t, uint64(5), bNum) + assert.True(t, !bTime.IsZero()) + } + assert.Equal(t, fakeBlockVerifier.VerifyBlockAttestationCallCount(), 1) + + hr.Stop() + assert.Eventually(t, hr.IsStopped, time.Second, time.Millisecond) +} + +func prepareBlock(seq uint64, contentType orderer.SeekInfo_SeekContentType, goodSignature uint32) *orderer.DeliverResponse { + const numTx = 10 + block := protoutil.NewBlock(seq, []byte{1, 2, 3, 4, 5, 6, 7, 8}) + data := &common.BlockData{ + Data: make([][]byte, numTx), + } + for i := 0; i < numTx; i++ { + data.Data[i] = []byte{byte(i), byte(seq)} + } + block.Header.DataHash = protoutil.BlockDataHash(data) + if contentType == orderer.SeekInfo_BLOCK { + block.Data = data + } + + if goodSignature > 0 { + block.Metadata.Metadata[common.BlockMetadataIndex_SIGNATURES] = []byte("good") + } else { + block.Metadata.Metadata[common.BlockMetadataIndex_SIGNATURES] = []byte("bad") + } + + return &orderer.DeliverResponse{Type: &orderer.DeliverResponse_Block{Block: block}} +} + +func naiveBlockVerifier(_ string, signedBlock *common.Block) error { + sigArray := signedBlock.Metadata.Metadata[common.BlockMetadataIndex_SIGNATURES] + sig := string(sigArray) + if sig == "good" { + return nil + } + return errors.New("test: bad signature") +} + +func waitForAtomicGreaterThan(addr *uint64, threshold uint64, timeoutOpt ...time.Duration) bool { + to := 5 * time.Second + if len(timeoutOpt) > 0 { + to = timeoutOpt[0] + } + + ticker := time.NewTicker(time.Millisecond) + defer ticker.Stop() + timeout := time.After(to) + + for { + select { + case <-ticker.C: + case <-timeout: + return false + } + + if atomic.LoadUint64(addr) > threshold { + return true + } + } +} diff --git a/internal/pkg/peer/blocksprovider/block_receiver.go b/internal/pkg/peer/blocksprovider/block_receiver.go index 197d3a57427..0ffb7dba6d4 100644 --- a/internal/pkg/peer/blocksprovider/block_receiver.go +++ b/internal/pkg/peer/blocksprovider/block_receiver.go @@ -112,12 +112,12 @@ RecvLoop: // Loop until the endpoint is refreshed, or there is an error on the c onSuccess(blockNum) case <-br.stopC: br.logger.Infof("BlockReceiver got a signal to stop") - err = &errStopping{message: "got a signal to stop"} + err = &ErrStopping{Message: "got a signal to stop"} break RecvLoop } } - // cancel the sending side and wait for the start goroutine to exit + // cancel the sending side and wait for the `Start` goroutine to exit br.cancelSendFunc() <-br.recvC diff --git a/internal/pkg/peer/blocksprovider/deliverer.go b/internal/pkg/peer/blocksprovider/deliverer.go index e981d5d2960..5392061691d 100644 --- a/internal/pkg/peer/blocksprovider/deliverer.go +++ b/internal/pkg/peer/blocksprovider/deliverer.go @@ -56,7 +56,7 @@ type BlockVerifier interface { //go:generate counterfeiter -o fake/orderer_connection_source.go --fake-name OrdererConnectionSource . OrdererConnectionSource type OrdererConnectionSource interface { RandomEndpoint() (*orderers.Endpoint, error) - Endpoints() []*orderers.Endpoint + ShuffledEndpoints() []*orderers.Endpoint } //go:generate counterfeiter -o fake/dialer.go --fake-name Dialer . Dialer @@ -208,7 +208,7 @@ func (d *Deliverer) DeliverBlocks() { switch err.(type) { case *errRefreshEndpoint: // Don't count it as an error, we'll reconnect immediately. - case *errStopping: + case *ErrStopping: // Don't count it as an error, it is a signal to stop. default: failureCounter++ diff --git a/internal/pkg/peer/blocksprovider/deliverer_test.go b/internal/pkg/peer/blocksprovider/deliverer_test.go index 9ae2799955a..a2389273189 100644 --- a/internal/pkg/peer/blocksprovider/deliverer_test.go +++ b/internal/pkg/peer/blocksprovider/deliverer_test.go @@ -302,9 +302,9 @@ var _ = Describe("CFT-Deliverer", func() { When("an error occurs, then a block is successfully delivered", func() { BeforeEach(func() { fakeDeliverStreamer.DeliverReturnsOnCall(0, nil, fmt.Errorf("deliver-error")) - fakeDeliverStreamer.DeliverReturnsOnCall(1, fakeDeliverClient, nil) fakeDeliverStreamer.DeliverReturnsOnCall(1, nil, fmt.Errorf("deliver-error")) fakeDeliverStreamer.DeliverReturnsOnCall(2, nil, fmt.Errorf("deliver-error")) + fakeDeliverStreamer.DeliverReturnsOnCall(3, fakeDeliverClient, nil) }) It("sleeps in an exponential fashion and retries until dial is successful", func() { diff --git a/internal/pkg/peer/blocksprovider/delivery_requester.go b/internal/pkg/peer/blocksprovider/delivery_requester.go index fc3d92d142d..b13ae0d9248 100644 --- a/internal/pkg/peer/blocksprovider/delivery_requester.go +++ b/internal/pkg/peer/blocksprovider/delivery_requester.go @@ -50,23 +50,7 @@ func (dr *DeliveryRequester) SeekInfoBlocksFrom(ledgerHeight uint64) (*common.En common.HeaderType_DELIVER_SEEK_INFO, dr.channelID, dr.signer, - &orderer.SeekInfo{ - Start: &orderer.SeekPosition{ - Type: &orderer.SeekPosition_Specified{ - Specified: &orderer.SeekSpecified{ - Number: ledgerHeight, - }, - }, - }, - Stop: &orderer.SeekPosition{ - Type: &orderer.SeekPosition_Specified{ - Specified: &orderer.SeekSpecified{ - Number: math.MaxUint64, - }, - }, - }, - Behavior: orderer.SeekInfo_BLOCK_UNTIL_READY, - }, + seekInfoFrom(ledgerHeight, orderer.SeekInfo_BLOCK), int32(0), uint64(0), dr.tlsCertHash, @@ -76,8 +60,36 @@ func (dr *DeliveryRequester) SeekInfoBlocksFrom(ledgerHeight uint64) (*common.En // SeekInfoHeadersFrom produces a signed SeekInfo envelope requesting a stream of headers (block attestations) from // a certain block number. func (dr *DeliveryRequester) SeekInfoHeadersFrom(ledgerHeight uint64) (*common.Envelope, error) { - // TODO - return nil, errors.New("not implemented yet") + return protoutil.CreateSignedEnvelopeWithTLSBinding( + common.HeaderType_DELIVER_SEEK_INFO, + dr.channelID, + dr.signer, + seekInfoFrom(ledgerHeight, orderer.SeekInfo_HEADER_WITH_SIG), + int32(0), + uint64(0), + dr.tlsCertHash, + ) +} + +func seekInfoFrom(height uint64, contentType orderer.SeekInfo_SeekContentType) *orderer.SeekInfo { + return &orderer.SeekInfo{ + Start: &orderer.SeekPosition{ + Type: &orderer.SeekPosition_Specified{ + Specified: &orderer.SeekSpecified{ + Number: height, + }, + }, + }, + Stop: &orderer.SeekPosition{ + Type: &orderer.SeekPosition_Specified{ + Specified: &orderer.SeekSpecified{ + Number: math.MaxUint64, + }, + }, + }, + Behavior: orderer.SeekInfo_BLOCK_UNTIL_READY, + ContentType: contentType, + } } // SeekInfoNewestHeader produces a signed SeekInfo envelope requesting the newest header (block attestation) available diff --git a/internal/pkg/peer/blocksprovider/fake/block_progress_reporter.go b/internal/pkg/peer/blocksprovider/fake/block_progress_reporter.go new file mode 100644 index 00000000000..c754fd215ed --- /dev/null +++ b/internal/pkg/peer/blocksprovider/fake/block_progress_reporter.go @@ -0,0 +1,108 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fake + +import ( + "sync" + "time" + + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" +) + +type BlockProgressReporter struct { + BlockProgressStub func() (uint64, time.Time) + blockProgressMutex sync.RWMutex + blockProgressArgsForCall []struct { + } + blockProgressReturns struct { + result1 uint64 + result2 time.Time + } + blockProgressReturnsOnCall map[int]struct { + result1 uint64 + result2 time.Time + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *BlockProgressReporter) BlockProgress() (uint64, time.Time) { + fake.blockProgressMutex.Lock() + ret, specificReturn := fake.blockProgressReturnsOnCall[len(fake.blockProgressArgsForCall)] + fake.blockProgressArgsForCall = append(fake.blockProgressArgsForCall, struct { + }{}) + stub := fake.BlockProgressStub + fakeReturns := fake.blockProgressReturns + fake.recordInvocation("BlockProgress", []interface{}{}) + fake.blockProgressMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *BlockProgressReporter) BlockProgressCallCount() int { + fake.blockProgressMutex.RLock() + defer fake.blockProgressMutex.RUnlock() + return len(fake.blockProgressArgsForCall) +} + +func (fake *BlockProgressReporter) BlockProgressCalls(stub func() (uint64, time.Time)) { + fake.blockProgressMutex.Lock() + defer fake.blockProgressMutex.Unlock() + fake.BlockProgressStub = stub +} + +func (fake *BlockProgressReporter) BlockProgressReturns(result1 uint64, result2 time.Time) { + fake.blockProgressMutex.Lock() + defer fake.blockProgressMutex.Unlock() + fake.BlockProgressStub = nil + fake.blockProgressReturns = struct { + result1 uint64 + result2 time.Time + }{result1, result2} +} + +func (fake *BlockProgressReporter) BlockProgressReturnsOnCall(i int, result1 uint64, result2 time.Time) { + fake.blockProgressMutex.Lock() + defer fake.blockProgressMutex.Unlock() + fake.BlockProgressStub = nil + if fake.blockProgressReturnsOnCall == nil { + fake.blockProgressReturnsOnCall = make(map[int]struct { + result1 uint64 + result2 time.Time + }) + } + fake.blockProgressReturnsOnCall[i] = struct { + result1 uint64 + result2 time.Time + }{result1, result2} +} + +func (fake *BlockProgressReporter) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.blockProgressMutex.RLock() + defer fake.blockProgressMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *BlockProgressReporter) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ blocksprovider.BlockProgressReporter = new(BlockProgressReporter) diff --git a/internal/pkg/peer/blocksprovider/fake/block_verifier.go b/internal/pkg/peer/blocksprovider/fake/block_verifier.go index 9e7fefa29c6..7c3b2b385a2 100644 --- a/internal/pkg/peer/blocksprovider/fake/block_verifier.go +++ b/internal/pkg/peer/blocksprovider/fake/block_verifier.go @@ -47,15 +47,16 @@ func (fake *BlockVerifier) VerifyBlock(arg1 common.ChannelID, arg2 uint64, arg3 arg2 uint64 arg3 *commona.Block }{arg1, arg2, arg3}) + stub := fake.VerifyBlockStub + fakeReturns := fake.verifyBlockReturns fake.recordInvocation("VerifyBlock", []interface{}{arg1, arg2, arg3}) fake.verifyBlockMutex.Unlock() - if fake.VerifyBlockStub != nil { - return fake.VerifyBlockStub(arg1, arg2, arg3) + if stub != nil { + return stub(arg1, arg2, arg3) } if specificReturn { return ret.result1 } - fakeReturns := fake.verifyBlockReturns return fakeReturns.result1 } @@ -108,15 +109,16 @@ func (fake *BlockVerifier) VerifyBlockAttestation(arg1 string, arg2 *commona.Blo arg1 string arg2 *commona.Block }{arg1, arg2}) + stub := fake.VerifyBlockAttestationStub + fakeReturns := fake.verifyBlockAttestationReturns fake.recordInvocation("VerifyBlockAttestation", []interface{}{arg1, arg2}) fake.verifyBlockAttestationMutex.Unlock() - if fake.VerifyBlockAttestationStub != nil { - return fake.VerifyBlockAttestationStub(arg1, arg2) + if stub != nil { + return stub(arg1, arg2) } if specificReturn { return ret.result1 } - fakeReturns := fake.verifyBlockAttestationReturns return fakeReturns.result1 } diff --git a/internal/pkg/peer/blocksprovider/fake/censorship_detector.go b/internal/pkg/peer/blocksprovider/fake/censorship_detector.go new file mode 100644 index 00000000000..a241be9d20a --- /dev/null +++ b/internal/pkg/peer/blocksprovider/fake/censorship_detector.go @@ -0,0 +1,162 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fake + +import ( + "sync" + + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" +) + +type CensorshipDetector struct { + ErrorsChannelStub func() <-chan error + errorsChannelMutex sync.RWMutex + errorsChannelArgsForCall []struct { + } + errorsChannelReturns struct { + result1 <-chan error + } + errorsChannelReturnsOnCall map[int]struct { + result1 <-chan error + } + MonitorStub func() + monitorMutex sync.RWMutex + monitorArgsForCall []struct { + } + StopStub func() + stopMutex sync.RWMutex + stopArgsForCall []struct { + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CensorshipDetector) ErrorsChannel() <-chan error { + fake.errorsChannelMutex.Lock() + ret, specificReturn := fake.errorsChannelReturnsOnCall[len(fake.errorsChannelArgsForCall)] + fake.errorsChannelArgsForCall = append(fake.errorsChannelArgsForCall, struct { + }{}) + stub := fake.ErrorsChannelStub + fakeReturns := fake.errorsChannelReturns + fake.recordInvocation("ErrorsChannel", []interface{}{}) + fake.errorsChannelMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CensorshipDetector) ErrorsChannelCallCount() int { + fake.errorsChannelMutex.RLock() + defer fake.errorsChannelMutex.RUnlock() + return len(fake.errorsChannelArgsForCall) +} + +func (fake *CensorshipDetector) ErrorsChannelCalls(stub func() <-chan error) { + fake.errorsChannelMutex.Lock() + defer fake.errorsChannelMutex.Unlock() + fake.ErrorsChannelStub = stub +} + +func (fake *CensorshipDetector) ErrorsChannelReturns(result1 <-chan error) { + fake.errorsChannelMutex.Lock() + defer fake.errorsChannelMutex.Unlock() + fake.ErrorsChannelStub = nil + fake.errorsChannelReturns = struct { + result1 <-chan error + }{result1} +} + +func (fake *CensorshipDetector) ErrorsChannelReturnsOnCall(i int, result1 <-chan error) { + fake.errorsChannelMutex.Lock() + defer fake.errorsChannelMutex.Unlock() + fake.ErrorsChannelStub = nil + if fake.errorsChannelReturnsOnCall == nil { + fake.errorsChannelReturnsOnCall = make(map[int]struct { + result1 <-chan error + }) + } + fake.errorsChannelReturnsOnCall[i] = struct { + result1 <-chan error + }{result1} +} + +func (fake *CensorshipDetector) Monitor() { + fake.monitorMutex.Lock() + fake.monitorArgsForCall = append(fake.monitorArgsForCall, struct { + }{}) + stub := fake.MonitorStub + fake.recordInvocation("Monitor", []interface{}{}) + fake.monitorMutex.Unlock() + if stub != nil { + fake.MonitorStub() + } +} + +func (fake *CensorshipDetector) MonitorCallCount() int { + fake.monitorMutex.RLock() + defer fake.monitorMutex.RUnlock() + return len(fake.monitorArgsForCall) +} + +func (fake *CensorshipDetector) MonitorCalls(stub func()) { + fake.monitorMutex.Lock() + defer fake.monitorMutex.Unlock() + fake.MonitorStub = stub +} + +func (fake *CensorshipDetector) Stop() { + fake.stopMutex.Lock() + fake.stopArgsForCall = append(fake.stopArgsForCall, struct { + }{}) + stub := fake.StopStub + fake.recordInvocation("Stop", []interface{}{}) + fake.stopMutex.Unlock() + if stub != nil { + fake.StopStub() + } +} + +func (fake *CensorshipDetector) StopCallCount() int { + fake.stopMutex.RLock() + defer fake.stopMutex.RUnlock() + return len(fake.stopArgsForCall) +} + +func (fake *CensorshipDetector) StopCalls(stub func()) { + fake.stopMutex.Lock() + defer fake.stopMutex.Unlock() + fake.StopStub = stub +} + +func (fake *CensorshipDetector) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.errorsChannelMutex.RLock() + defer fake.errorsChannelMutex.RUnlock() + fake.monitorMutex.RLock() + defer fake.monitorMutex.RUnlock() + fake.stopMutex.RLock() + defer fake.stopMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CensorshipDetector) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ blocksprovider.CensorshipDetector = new(CensorshipDetector) diff --git a/internal/pkg/peer/blocksprovider/fake/censorship_detector_factory.go b/internal/pkg/peer/blocksprovider/fake/censorship_detector_factory.go new file mode 100644 index 00000000000..40fd224aff0 --- /dev/null +++ b/internal/pkg/peer/blocksprovider/fake/censorship_detector_factory.go @@ -0,0 +1,129 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fake + +import ( + "sync" + + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" + "github.com/hyperledger/fabric/internal/pkg/peer/orderers" +) + +type CensorshipDetectorFactory struct { + CreateStub func(string, blocksprovider.BlockVerifier, blocksprovider.DeliverClientRequester, blocksprovider.BlockProgressReporter, []*orderers.Endpoint, int, blocksprovider.TimeoutConfig) blocksprovider.CensorshipDetector + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 string + arg2 blocksprovider.BlockVerifier + arg3 blocksprovider.DeliverClientRequester + arg4 blocksprovider.BlockProgressReporter + arg5 []*orderers.Endpoint + arg6 int + arg7 blocksprovider.TimeoutConfig + } + createReturns struct { + result1 blocksprovider.CensorshipDetector + } + createReturnsOnCall map[int]struct { + result1 blocksprovider.CensorshipDetector + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *CensorshipDetectorFactory) Create(arg1 string, arg2 blocksprovider.BlockVerifier, arg3 blocksprovider.DeliverClientRequester, arg4 blocksprovider.BlockProgressReporter, arg5 []*orderers.Endpoint, arg6 int, arg7 blocksprovider.TimeoutConfig) blocksprovider.CensorshipDetector { + var arg5Copy []*orderers.Endpoint + if arg5 != nil { + arg5Copy = make([]*orderers.Endpoint, len(arg5)) + copy(arg5Copy, arg5) + } + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 string + arg2 blocksprovider.BlockVerifier + arg3 blocksprovider.DeliverClientRequester + arg4 blocksprovider.BlockProgressReporter + arg5 []*orderers.Endpoint + arg6 int + arg7 blocksprovider.TimeoutConfig + }{arg1, arg2, arg3, arg4, arg5Copy, arg6, arg7}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3, arg4, arg5Copy, arg6, arg7}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *CensorshipDetectorFactory) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *CensorshipDetectorFactory) CreateCalls(stub func(string, blocksprovider.BlockVerifier, blocksprovider.DeliverClientRequester, blocksprovider.BlockProgressReporter, []*orderers.Endpoint, int, blocksprovider.TimeoutConfig) blocksprovider.CensorshipDetector) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *CensorshipDetectorFactory) CreateArgsForCall(i int) (string, blocksprovider.BlockVerifier, blocksprovider.DeliverClientRequester, blocksprovider.BlockProgressReporter, []*orderers.Endpoint, int, blocksprovider.TimeoutConfig) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7 +} + +func (fake *CensorshipDetectorFactory) CreateReturns(result1 blocksprovider.CensorshipDetector) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 blocksprovider.CensorshipDetector + }{result1} +} + +func (fake *CensorshipDetectorFactory) CreateReturnsOnCall(i int, result1 blocksprovider.CensorshipDetector) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 blocksprovider.CensorshipDetector + }) + } + fake.createReturnsOnCall[i] = struct { + result1 blocksprovider.CensorshipDetector + }{result1} +} + +func (fake *CensorshipDetectorFactory) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *CensorshipDetectorFactory) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ blocksprovider.CensorshipDetectorFactory = new(CensorshipDetectorFactory) diff --git a/internal/pkg/peer/blocksprovider/fake/deliver_client_requester.go b/internal/pkg/peer/blocksprovider/fake/deliver_client_requester.go new file mode 100644 index 00000000000..329affdcf30 --- /dev/null +++ b/internal/pkg/peer/blocksprovider/fake/deliver_client_requester.go @@ -0,0 +1,205 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fake + +import ( + "sync" + + "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" + "github.com/hyperledger/fabric/internal/pkg/peer/orderers" +) + +type DeliverClientRequester struct { + ConnectStub func(*common.Envelope, *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) + connectMutex sync.RWMutex + connectArgsForCall []struct { + arg1 *common.Envelope + arg2 *orderers.Endpoint + } + connectReturns struct { + result1 orderer.AtomicBroadcast_DeliverClient + result2 func() + result3 error + } + connectReturnsOnCall map[int]struct { + result1 orderer.AtomicBroadcast_DeliverClient + result2 func() + result3 error + } + SeekInfoHeadersFromStub func(uint64) (*common.Envelope, error) + seekInfoHeadersFromMutex sync.RWMutex + seekInfoHeadersFromArgsForCall []struct { + arg1 uint64 + } + seekInfoHeadersFromReturns struct { + result1 *common.Envelope + result2 error + } + seekInfoHeadersFromReturnsOnCall map[int]struct { + result1 *common.Envelope + result2 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DeliverClientRequester) Connect(arg1 *common.Envelope, arg2 *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error) { + fake.connectMutex.Lock() + ret, specificReturn := fake.connectReturnsOnCall[len(fake.connectArgsForCall)] + fake.connectArgsForCall = append(fake.connectArgsForCall, struct { + arg1 *common.Envelope + arg2 *orderers.Endpoint + }{arg1, arg2}) + stub := fake.ConnectStub + fakeReturns := fake.connectReturns + fake.recordInvocation("Connect", []interface{}{arg1, arg2}) + fake.connectMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2, ret.result3 + } + return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3 +} + +func (fake *DeliverClientRequester) ConnectCallCount() int { + fake.connectMutex.RLock() + defer fake.connectMutex.RUnlock() + return len(fake.connectArgsForCall) +} + +func (fake *DeliverClientRequester) ConnectCalls(stub func(*common.Envelope, *orderers.Endpoint) (orderer.AtomicBroadcast_DeliverClient, func(), error)) { + fake.connectMutex.Lock() + defer fake.connectMutex.Unlock() + fake.ConnectStub = stub +} + +func (fake *DeliverClientRequester) ConnectArgsForCall(i int) (*common.Envelope, *orderers.Endpoint) { + fake.connectMutex.RLock() + defer fake.connectMutex.RUnlock() + argsForCall := fake.connectArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *DeliverClientRequester) ConnectReturns(result1 orderer.AtomicBroadcast_DeliverClient, result2 func(), result3 error) { + fake.connectMutex.Lock() + defer fake.connectMutex.Unlock() + fake.ConnectStub = nil + fake.connectReturns = struct { + result1 orderer.AtomicBroadcast_DeliverClient + result2 func() + result3 error + }{result1, result2, result3} +} + +func (fake *DeliverClientRequester) ConnectReturnsOnCall(i int, result1 orderer.AtomicBroadcast_DeliverClient, result2 func(), result3 error) { + fake.connectMutex.Lock() + defer fake.connectMutex.Unlock() + fake.ConnectStub = nil + if fake.connectReturnsOnCall == nil { + fake.connectReturnsOnCall = make(map[int]struct { + result1 orderer.AtomicBroadcast_DeliverClient + result2 func() + result3 error + }) + } + fake.connectReturnsOnCall[i] = struct { + result1 orderer.AtomicBroadcast_DeliverClient + result2 func() + result3 error + }{result1, result2, result3} +} + +func (fake *DeliverClientRequester) SeekInfoHeadersFrom(arg1 uint64) (*common.Envelope, error) { + fake.seekInfoHeadersFromMutex.Lock() + ret, specificReturn := fake.seekInfoHeadersFromReturnsOnCall[len(fake.seekInfoHeadersFromArgsForCall)] + fake.seekInfoHeadersFromArgsForCall = append(fake.seekInfoHeadersFromArgsForCall, struct { + arg1 uint64 + }{arg1}) + stub := fake.SeekInfoHeadersFromStub + fakeReturns := fake.seekInfoHeadersFromReturns + fake.recordInvocation("SeekInfoHeadersFrom", []interface{}{arg1}) + fake.seekInfoHeadersFromMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *DeliverClientRequester) SeekInfoHeadersFromCallCount() int { + fake.seekInfoHeadersFromMutex.RLock() + defer fake.seekInfoHeadersFromMutex.RUnlock() + return len(fake.seekInfoHeadersFromArgsForCall) +} + +func (fake *DeliverClientRequester) SeekInfoHeadersFromCalls(stub func(uint64) (*common.Envelope, error)) { + fake.seekInfoHeadersFromMutex.Lock() + defer fake.seekInfoHeadersFromMutex.Unlock() + fake.SeekInfoHeadersFromStub = stub +} + +func (fake *DeliverClientRequester) SeekInfoHeadersFromArgsForCall(i int) uint64 { + fake.seekInfoHeadersFromMutex.RLock() + defer fake.seekInfoHeadersFromMutex.RUnlock() + argsForCall := fake.seekInfoHeadersFromArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *DeliverClientRequester) SeekInfoHeadersFromReturns(result1 *common.Envelope, result2 error) { + fake.seekInfoHeadersFromMutex.Lock() + defer fake.seekInfoHeadersFromMutex.Unlock() + fake.SeekInfoHeadersFromStub = nil + fake.seekInfoHeadersFromReturns = struct { + result1 *common.Envelope + result2 error + }{result1, result2} +} + +func (fake *DeliverClientRequester) SeekInfoHeadersFromReturnsOnCall(i int, result1 *common.Envelope, result2 error) { + fake.seekInfoHeadersFromMutex.Lock() + defer fake.seekInfoHeadersFromMutex.Unlock() + fake.SeekInfoHeadersFromStub = nil + if fake.seekInfoHeadersFromReturnsOnCall == nil { + fake.seekInfoHeadersFromReturnsOnCall = make(map[int]struct { + result1 *common.Envelope + result2 error + }) + } + fake.seekInfoHeadersFromReturnsOnCall[i] = struct { + result1 *common.Envelope + result2 error + }{result1, result2} +} + +func (fake *DeliverClientRequester) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.connectMutex.RLock() + defer fake.connectMutex.RUnlock() + fake.seekInfoHeadersFromMutex.RLock() + defer fake.seekInfoHeadersFromMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DeliverClientRequester) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ blocksprovider.DeliverClientRequester = new(DeliverClientRequester) diff --git a/internal/pkg/peer/blocksprovider/fake/deliver_streamer.go b/internal/pkg/peer/blocksprovider/fake/deliver_streamer.go index e9fcfbcdbae..853b58ebf78 100644 --- a/internal/pkg/peer/blocksprovider/fake/deliver_streamer.go +++ b/internal/pkg/peer/blocksprovider/fake/deliver_streamer.go @@ -36,15 +36,16 @@ func (fake *DeliverStreamer) Deliver(arg1 context.Context, arg2 *grpc.ClientConn arg1 context.Context arg2 *grpc.ClientConn }{arg1, arg2}) + stub := fake.DeliverStub + fakeReturns := fake.deliverReturns fake.recordInvocation("Deliver", []interface{}{arg1, arg2}) fake.deliverMutex.Unlock() - if fake.DeliverStub != nil { - return fake.DeliverStub(arg1, arg2) + if stub != nil { + return stub(arg1, arg2) } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.deliverReturns return fakeReturns.result1, fakeReturns.result2 } diff --git a/internal/pkg/peer/blocksprovider/fake/dialer.go b/internal/pkg/peer/blocksprovider/fake/dialer.go index f0053988e56..f00434b61c2 100644 --- a/internal/pkg/peer/blocksprovider/fake/dialer.go +++ b/internal/pkg/peer/blocksprovider/fake/dialer.go @@ -39,15 +39,16 @@ func (fake *Dialer) Dial(arg1 string, arg2 [][]byte) (*grpc.ClientConn, error) { arg1 string arg2 [][]byte }{arg1, arg2Copy}) + stub := fake.DialStub + fakeReturns := fake.dialReturns fake.recordInvocation("Dial", []interface{}{arg1, arg2Copy}) fake.dialMutex.Unlock() - if fake.DialStub != nil { - return fake.DialStub(arg1, arg2) + if stub != nil { + return stub(arg1, arg2) } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.dialReturns return fakeReturns.result1, fakeReturns.result2 } diff --git a/internal/pkg/peer/blocksprovider/fake/duration_exceeded_handler.go b/internal/pkg/peer/blocksprovider/fake/duration_exceeded_handler.go new file mode 100644 index 00000000000..0cc04616a5c --- /dev/null +++ b/internal/pkg/peer/blocksprovider/fake/duration_exceeded_handler.go @@ -0,0 +1,102 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package fake + +import ( + "sync" + + "github.com/hyperledger/fabric/internal/pkg/peer/blocksprovider" +) + +type DurationExceededHandler struct { + DurationExceededHandlerStub func() bool + durationExceededHandlerMutex sync.RWMutex + durationExceededHandlerArgsForCall []struct { + } + durationExceededHandlerReturns struct { + result1 bool + } + durationExceededHandlerReturnsOnCall map[int]struct { + result1 bool + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *DurationExceededHandler) DurationExceededHandler() bool { + fake.durationExceededHandlerMutex.Lock() + ret, specificReturn := fake.durationExceededHandlerReturnsOnCall[len(fake.durationExceededHandlerArgsForCall)] + fake.durationExceededHandlerArgsForCall = append(fake.durationExceededHandlerArgsForCall, struct { + }{}) + stub := fake.DurationExceededHandlerStub + fakeReturns := fake.durationExceededHandlerReturns + fake.recordInvocation("DurationExceededHandler", []interface{}{}) + fake.durationExceededHandlerMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *DurationExceededHandler) DurationExceededHandlerCallCount() int { + fake.durationExceededHandlerMutex.RLock() + defer fake.durationExceededHandlerMutex.RUnlock() + return len(fake.durationExceededHandlerArgsForCall) +} + +func (fake *DurationExceededHandler) DurationExceededHandlerCalls(stub func() bool) { + fake.durationExceededHandlerMutex.Lock() + defer fake.durationExceededHandlerMutex.Unlock() + fake.DurationExceededHandlerStub = stub +} + +func (fake *DurationExceededHandler) DurationExceededHandlerReturns(result1 bool) { + fake.durationExceededHandlerMutex.Lock() + defer fake.durationExceededHandlerMutex.Unlock() + fake.DurationExceededHandlerStub = nil + fake.durationExceededHandlerReturns = struct { + result1 bool + }{result1} +} + +func (fake *DurationExceededHandler) DurationExceededHandlerReturnsOnCall(i int, result1 bool) { + fake.durationExceededHandlerMutex.Lock() + defer fake.durationExceededHandlerMutex.Unlock() + fake.DurationExceededHandlerStub = nil + if fake.durationExceededHandlerReturnsOnCall == nil { + fake.durationExceededHandlerReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.durationExceededHandlerReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *DurationExceededHandler) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.durationExceededHandlerMutex.RLock() + defer fake.durationExceededHandlerMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *DurationExceededHandler) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ blocksprovider.DurationExceededHandler = new(DurationExceededHandler) diff --git a/internal/pkg/peer/blocksprovider/fake/gossip_service_adapter.go b/internal/pkg/peer/blocksprovider/fake/gossip_service_adapter.go index c5a63d991ed..37630f81c2f 100644 --- a/internal/pkg/peer/blocksprovider/fake/gossip_service_adapter.go +++ b/internal/pkg/peer/blocksprovider/fake/gossip_service_adapter.go @@ -37,15 +37,16 @@ func (fake *GossipServiceAdapter) AddPayload(arg1 string, arg2 *gossip.Payload) arg1 string arg2 *gossip.Payload }{arg1, arg2}) + stub := fake.AddPayloadStub + fakeReturns := fake.addPayloadReturns fake.recordInvocation("AddPayload", []interface{}{arg1, arg2}) fake.addPayloadMutex.Unlock() - if fake.AddPayloadStub != nil { - return fake.AddPayloadStub(arg1, arg2) + if stub != nil { + return stub(arg1, arg2) } if specificReturn { return ret.result1 } - fakeReturns := fake.addPayloadReturns return fakeReturns.result1 } @@ -96,9 +97,10 @@ func (fake *GossipServiceAdapter) Gossip(arg1 *gossip.GossipMessage) { fake.gossipArgsForCall = append(fake.gossipArgsForCall, struct { arg1 *gossip.GossipMessage }{arg1}) + stub := fake.GossipStub fake.recordInvocation("Gossip", []interface{}{arg1}) fake.gossipMutex.Unlock() - if fake.GossipStub != nil { + if stub != nil { fake.GossipStub(arg1) } } diff --git a/internal/pkg/peer/blocksprovider/fake/ledger_info.go b/internal/pkg/peer/blocksprovider/fake/ledger_info.go index 1ea37fdee7a..a2ffb8bdc2e 100644 --- a/internal/pkg/peer/blocksprovider/fake/ledger_info.go +++ b/internal/pkg/peer/blocksprovider/fake/ledger_info.go @@ -29,15 +29,16 @@ func (fake *LedgerInfo) LedgerHeight() (uint64, error) { ret, specificReturn := fake.ledgerHeightReturnsOnCall[len(fake.ledgerHeightArgsForCall)] fake.ledgerHeightArgsForCall = append(fake.ledgerHeightArgsForCall, struct { }{}) + stub := fake.LedgerHeightStub + fakeReturns := fake.ledgerHeightReturns fake.recordInvocation("LedgerHeight", []interface{}{}) fake.ledgerHeightMutex.Unlock() - if fake.LedgerHeightStub != nil { - return fake.LedgerHeightStub() + if stub != nil { + return stub() } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.ledgerHeightReturns return fakeReturns.result1, fakeReturns.result2 } diff --git a/internal/pkg/peer/blocksprovider/fake/orderer_connection_source.go b/internal/pkg/peer/blocksprovider/fake/orderer_connection_source.go index b0908647bd4..a8c3487a5a4 100644 --- a/internal/pkg/peer/blocksprovider/fake/orderer_connection_source.go +++ b/internal/pkg/peer/blocksprovider/fake/orderer_connection_source.go @@ -9,16 +9,6 @@ import ( ) type OrdererConnectionSource struct { - EndpointsStub func() []*orderers.Endpoint - endpointsMutex sync.RWMutex - endpointsArgsForCall []struct { - } - endpointsReturns struct { - result1 []*orderers.Endpoint - } - endpointsReturnsOnCall map[int]struct { - result1 []*orderers.Endpoint - } RandomEndpointStub func() (*orderers.Endpoint, error) randomEndpointMutex sync.RWMutex randomEndpointArgsForCall []struct { @@ -31,60 +21,18 @@ type OrdererConnectionSource struct { result1 *orderers.Endpoint result2 error } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *OrdererConnectionSource) Endpoints() []*orderers.Endpoint { - fake.endpointsMutex.Lock() - ret, specificReturn := fake.endpointsReturnsOnCall[len(fake.endpointsArgsForCall)] - fake.endpointsArgsForCall = append(fake.endpointsArgsForCall, struct { - }{}) - fake.recordInvocation("Endpoints", []interface{}{}) - fake.endpointsMutex.Unlock() - if fake.EndpointsStub != nil { - return fake.EndpointsStub() - } - if specificReturn { - return ret.result1 + ShuffledEndpointsStub func() []*orderers.Endpoint + shuffledEndpointsMutex sync.RWMutex + shuffledEndpointsArgsForCall []struct { } - fakeReturns := fake.endpointsReturns - return fakeReturns.result1 -} - -func (fake *OrdererConnectionSource) EndpointsCallCount() int { - fake.endpointsMutex.RLock() - defer fake.endpointsMutex.RUnlock() - return len(fake.endpointsArgsForCall) -} - -func (fake *OrdererConnectionSource) EndpointsCalls(stub func() []*orderers.Endpoint) { - fake.endpointsMutex.Lock() - defer fake.endpointsMutex.Unlock() - fake.EndpointsStub = stub -} - -func (fake *OrdererConnectionSource) EndpointsReturns(result1 []*orderers.Endpoint) { - fake.endpointsMutex.Lock() - defer fake.endpointsMutex.Unlock() - fake.EndpointsStub = nil - fake.endpointsReturns = struct { + shuffledEndpointsReturns struct { result1 []*orderers.Endpoint - }{result1} -} - -func (fake *OrdererConnectionSource) EndpointsReturnsOnCall(i int, result1 []*orderers.Endpoint) { - fake.endpointsMutex.Lock() - defer fake.endpointsMutex.Unlock() - fake.EndpointsStub = nil - if fake.endpointsReturnsOnCall == nil { - fake.endpointsReturnsOnCall = make(map[int]struct { - result1 []*orderers.Endpoint - }) } - fake.endpointsReturnsOnCall[i] = struct { + shuffledEndpointsReturnsOnCall map[int]struct { result1 []*orderers.Endpoint - }{result1} + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex } func (fake *OrdererConnectionSource) RandomEndpoint() (*orderers.Endpoint, error) { @@ -92,15 +40,16 @@ func (fake *OrdererConnectionSource) RandomEndpoint() (*orderers.Endpoint, error ret, specificReturn := fake.randomEndpointReturnsOnCall[len(fake.randomEndpointArgsForCall)] fake.randomEndpointArgsForCall = append(fake.randomEndpointArgsForCall, struct { }{}) + stub := fake.RandomEndpointStub + fakeReturns := fake.randomEndpointReturns fake.recordInvocation("RandomEndpoint", []interface{}{}) fake.randomEndpointMutex.Unlock() - if fake.RandomEndpointStub != nil { - return fake.RandomEndpointStub() + if stub != nil { + return stub() } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.randomEndpointReturns return fakeReturns.result1, fakeReturns.result2 } @@ -142,13 +91,66 @@ func (fake *OrdererConnectionSource) RandomEndpointReturnsOnCall(i int, result1 }{result1, result2} } +func (fake *OrdererConnectionSource) ShuffledEndpoints() []*orderers.Endpoint { + fake.shuffledEndpointsMutex.Lock() + ret, specificReturn := fake.shuffledEndpointsReturnsOnCall[len(fake.shuffledEndpointsArgsForCall)] + fake.shuffledEndpointsArgsForCall = append(fake.shuffledEndpointsArgsForCall, struct { + }{}) + stub := fake.ShuffledEndpointsStub + fakeReturns := fake.shuffledEndpointsReturns + fake.recordInvocation("ShuffledEndpoints", []interface{}{}) + fake.shuffledEndpointsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *OrdererConnectionSource) ShuffledEndpointsCallCount() int { + fake.shuffledEndpointsMutex.RLock() + defer fake.shuffledEndpointsMutex.RUnlock() + return len(fake.shuffledEndpointsArgsForCall) +} + +func (fake *OrdererConnectionSource) ShuffledEndpointsCalls(stub func() []*orderers.Endpoint) { + fake.shuffledEndpointsMutex.Lock() + defer fake.shuffledEndpointsMutex.Unlock() + fake.ShuffledEndpointsStub = stub +} + +func (fake *OrdererConnectionSource) ShuffledEndpointsReturns(result1 []*orderers.Endpoint) { + fake.shuffledEndpointsMutex.Lock() + defer fake.shuffledEndpointsMutex.Unlock() + fake.ShuffledEndpointsStub = nil + fake.shuffledEndpointsReturns = struct { + result1 []*orderers.Endpoint + }{result1} +} + +func (fake *OrdererConnectionSource) ShuffledEndpointsReturnsOnCall(i int, result1 []*orderers.Endpoint) { + fake.shuffledEndpointsMutex.Lock() + defer fake.shuffledEndpointsMutex.Unlock() + fake.ShuffledEndpointsStub = nil + if fake.shuffledEndpointsReturnsOnCall == nil { + fake.shuffledEndpointsReturnsOnCall = make(map[int]struct { + result1 []*orderers.Endpoint + }) + } + fake.shuffledEndpointsReturnsOnCall[i] = struct { + result1 []*orderers.Endpoint + }{result1} +} + func (fake *OrdererConnectionSource) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.endpointsMutex.RLock() - defer fake.endpointsMutex.RUnlock() fake.randomEndpointMutex.RLock() defer fake.randomEndpointMutex.RUnlock() + fake.shuffledEndpointsMutex.RLock() + defer fake.shuffledEndpointsMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/internal/pkg/peer/blocksprovider/timeout_config.go b/internal/pkg/peer/blocksprovider/timeout_config.go new file mode 100644 index 00000000000..c8d8a6d933a --- /dev/null +++ b/internal/pkg/peer/blocksprovider/timeout_config.go @@ -0,0 +1,36 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider + +import "time" + +const ( + BftMinRetryInterval = 50 * time.Millisecond + BftMaxRetryInterval = 10 * time.Second + BftBlockCensorshipTimeout = 20 * time.Second +) + +type TimeoutConfig struct { + // The initial value of the actual retry interval, which is increased on every failed retry + MinRetryInterval time.Duration + // The maximal value of the actual retry interval, which cannot increase beyond this value + MaxRetryInterval time.Duration + // The value of the bft censorship detection timeout + BlockCensorshipTimeout time.Duration +} + +func (t *TimeoutConfig) ApplyDefaults() { + if t.MinRetryInterval <= 0 { + t.MinRetryInterval = BftMinRetryInterval + } + if t.MaxRetryInterval <= 0 { + t.MaxRetryInterval = BftMaxRetryInterval + } + if t.BlockCensorshipTimeout <= 0 { + t.BlockCensorshipTimeout = BftBlockCensorshipTimeout + } +} diff --git a/internal/pkg/peer/blocksprovider/util.go b/internal/pkg/peer/blocksprovider/util.go index 7a79d01be3e..96c42d7473d 100644 --- a/internal/pkg/peer/blocksprovider/util.go +++ b/internal/pkg/peer/blocksprovider/util.go @@ -8,17 +8,7 @@ package blocksprovider import ( "math" - "math/rand" "time" - - "github.com/hyperledger/fabric/internal/pkg/peer/orderers" -) - -const ( - bftMinBackoffDelay = 10 * time.Millisecond - bftMaxBackoffDelay = 10 * time.Second - bftBlockRcvTotalBackoffDelay = 20 * time.Second - bftBlockCensorshipTimeout = 20 * time.Second ) type errRefreshEndpoint struct { @@ -29,20 +19,28 @@ func (e *errRefreshEndpoint) Error() string { return e.message } -type errStopping struct { - message string +type ErrStopping struct { + Message string } -func (e *errStopping) Error() string { - return e.message +func (e *ErrStopping) Error() string { + return e.Message } -type errFatal struct { - message string +type ErrFatal struct { + Message string } -func (e *errFatal) Error() string { - return e.message +func (e *ErrFatal) Error() string { + return e.Message +} + +type ErrCensorship struct { + Message string +} + +func (e *ErrCensorship) Error() string { + return e.Message } func backOffDuration(base float64, exponent uint, minDur, maxDur time.Duration) time.Duration { @@ -50,7 +48,7 @@ func backOffDuration(base float64, exponent uint, minDur, maxDur time.Duration) base = 1.0 } if minDur <= 0 { - minDur = bftMinBackoffDelay + minDur = BftMinRetryInterval } if maxDur < minDur { maxDur = minDur @@ -61,20 +59,27 @@ func backOffDuration(base float64, exponent uint, minDur, maxDur time.Duration) return time.Duration(fDurNano) } -func backOffSleep(backOffDur time.Duration, stopChan <-chan struct{}) { - select { - case <-time.After(backOffDur): - case <-stopChan: +// How many retries n does it take for minDur to reach maxDur, if minDur is scaled exponentially with base^i +// +// minDur * base^n > maxDur +// base^n > maxDur / minDur +// n * log(base) > log(maxDur / minDur) +// n > log(maxDur / minDur) / log(base) +func numRetries2Max(base float64, minDur, maxDur time.Duration) int { + if base <= 1.0 { + base = 1.001 } + if minDur <= 0 { + minDur = BftMinRetryInterval + } + if maxDur < minDur { + maxDur = minDur + } + + return int(math.Ceil(math.Log(float64(maxDur)/float64(minDur)) / math.Log(base))) } -// shuffle the endpoint slice -func shuffle(a []*orderers.Endpoint) []*orderers.Endpoint { - n := len(a) - returnedSlice := make([]*orderers.Endpoint, n) - indices := rand.Perm(n) - for i, idx := range indices { - returnedSlice[i] = a[idx] - } - return returnedSlice +type timeNumber struct { + t time.Time + n uint64 } diff --git a/internal/pkg/peer/blocksprovider/util_test.go b/internal/pkg/peer/blocksprovider/util_test.go new file mode 100644 index 00000000000..1c9526e2e4c --- /dev/null +++ b/internal/pkg/peer/blocksprovider/util_test.go @@ -0,0 +1,61 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package blocksprovider + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBackoffDuration(t *testing.T) { + dur := backOffDuration(2.0, 0, BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, BftMinRetryInterval, dur) + + dur = backOffDuration(2.0, 1, BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, 2*BftMinRetryInterval, dur) + + dur = backOffDuration(2.0, 2, BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, 4*BftMinRetryInterval, dur) + + // large exponent -> dur=max + dur = backOffDuration(2.0, 20, BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, BftMaxRetryInterval, dur) + + // very large exponent -> dur=max + dur = backOffDuration(2.0, 1000000, BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, BftMaxRetryInterval, dur) + + // max < min -> max=min + dur = backOffDuration(2.0, 0, BftMinRetryInterval, BftMinRetryInterval/2) + assert.Equal(t, BftMinRetryInterval, dur) + + // min <= 0 -> min = BftMinRetryInterval + dur = backOffDuration(2.0, 0, -10*BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, BftMinRetryInterval, dur) + + // base < 1.0 -> base = 1.0 + dur = backOffDuration(0.5, 8, BftMinRetryInterval, BftMaxRetryInterval) + assert.Equal(t, BftMinRetryInterval, dur) +} + +func TestNumRetries(t *testing.T) { + // 2,4,8 + n := numRetries2Max(2.0, time.Second, 8*time.Second) + assert.Equal(t, 3, n) + + // 2,4,8,10 + n = numRetries2Max(2.0, time.Second, 10*time.Second) + assert.Equal(t, 4, n) + + n = numRetries2Max(2.0, time.Second, time.Second) + assert.Equal(t, 0, n) + + n = numRetries2Max(2.0, 2*time.Second, time.Second) + assert.Equal(t, 0, n) +} diff --git a/internal/pkg/peer/orderers/connection.go b/internal/pkg/peer/orderers/connection.go index d1f04f79238..bc129767ec3 100644 --- a/internal/pkg/peer/orderers/connection.go +++ b/internal/pkg/peer/orderers/connection.go @@ -65,6 +65,7 @@ func NewConnectionSource(logger *flogging.FabricLogger, overrides map[string]*En } } +// RandomEndpoint returns a random endpoint. func (cs *ConnectionSource) RandomEndpoint() (*Endpoint, error) { cs.mutex.RLock() defer cs.mutex.RUnlock() @@ -81,6 +82,20 @@ func (cs *ConnectionSource) Endpoints() []*Endpoint { return cs.allEndpoints } +// ShuffledEndpoints returns a shuffled array of endpoints in a new slice. +func (cs *ConnectionSource) ShuffledEndpoints() []*Endpoint { + cs.mutex.RLock() + defer cs.mutex.RUnlock() + + n := len(cs.allEndpoints) + returnedSlice := make([]*Endpoint, n) + indices := rand.Perm(n) + for i, idx := range indices { + returnedSlice[i] = cs.allEndpoints[idx] + } + return returnedSlice +} + func (cs *ConnectionSource) Update(globalAddrs []string, orgs map[string]OrdererOrg) { cs.mutex.Lock() defer cs.mutex.Unlock() diff --git a/internal/pkg/peer/orderers/connection_test.go b/internal/pkg/peer/orderers/connection_test.go index 7f9f98218eb..11f9c4cd663 100644 --- a/internal/pkg/peer/orderers/connection_test.go +++ b/internal/pkg/peer/orderers/connection_test.go @@ -10,6 +10,7 @@ import ( "bytes" "os" "sort" + "strings" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -136,6 +137,39 @@ var _ = Describe("Connection", func() { Expect(e.String()).To(Equal("")) }) + It("returns shuffled endpoints", func() { // there is a chance of failure here, but it is very small. + combinationSet := make(map[string]bool) + for i := 0; i < 10000; i++ { + shuffledEndpoints := cs.ShuffledEndpoints() + Expect(stripEndpoints(shuffledEndpoints)).To(ConsistOf( + stripEndpoints(endpoints), + )) + key := strings.Builder{} + for _, ep := range shuffledEndpoints { + key.WriteString(ep.Address) + key.WriteString(" ") + } + combinationSet[key.String()] = true + } + + Expect(len(combinationSet)).To(Equal(4 * 3 * 2 * 1)) + }) + + It("returns random endpoint", func() { // there is a chance of failure here, but it is very small. + combinationMap := make(map[string]*orderers.Endpoint) + for i := 0; i < 10000; i++ { + r, _ := cs.RandomEndpoint() + combinationMap[r.Address] = r + } + var all []*orderers.Endpoint + for _, ep := range combinationMap { + all = append(all, ep) + } + Expect(stripEndpoints(all)).To(ConsistOf( + stripEndpoints(endpoints), + )) + }) + When("an update does not modify the endpoint set", func() { BeforeEach(func() { cs.Update(nil, map[string]orderers.OrdererOrg{