From cdd5a046953b0d1ed55fc3bd1e43cf30b8d777cc Mon Sep 17 00:00:00 2001 From: Julian Castrence Date: Tue, 23 Feb 2021 12:48:35 -0500 Subject: [PATCH] Compare Snapshots Utility FAB-18425 Signed-off-by: Julian Castrence --- Makefile | 5 +- cmd/ledger/main.go | 58 ++ cmd/ledger/main_test.go | 59 ++ common/ledger/snapshot/file.go | 2 +- core/ledger/kvledger/kv_ledger.go | 4 +- core/ledger/kvledger/kv_ledger_provider.go | 8 +- core/ledger/kvledger/snapshot.go | 44 +- core/ledger/kvledger/snapshot_test.go | 24 +- .../txmgmt/privacyenabledstate/snapshot.go | 96 +-- .../privacyenabledstate/snapshot_test.go | 32 +- internal/ledger/compare.go | 414 ++++++++++++ internal/ledger/compare_test.go | 608 ++++++++++++++++++ 12 files changed, 1248 insertions(+), 106 deletions(-) create mode 100644 cmd/ledger/main.go create mode 100644 cmd/ledger/main_test.go create mode 100644 internal/ledger/compare.go create mode 100644 internal/ledger/compare_test.go diff --git a/Makefile b/Makefile index 4f802486a93..0366e14072d 100644 --- a/Makefile +++ b/Makefile @@ -28,11 +28,13 @@ # - idemixgen - builds a native idemixgen binary # - integration-test-prereqs - setup prerequisites for integration tests # - integration-test - runs the integration tests +# - ledger - builds a native fabric ledger troubleshooting binary # - license - checks go source files for Apache license header # - linter - runs all code checks # - native - ensures all native binaries are available # - orderer - builds a native fabric orderer binary # - orderer-docker[-clean] - ensures the orderer container is available[/cleaned] +# - osnadmin - builds a native fabric osnadmin binary # - peer - builds a native fabric peer binary # - peer-docker[-clean] - ensures the peer container is available[/cleaned] # - profile - runs unit tests for all packages in coverprofile mode (slow) @@ -83,13 +85,14 @@ GO_TAGS ?= RELEASE_EXES = orderer $(TOOLS_EXES) RELEASE_IMAGES = baseos ccenv orderer peer tools RELEASE_PLATFORMS = darwin-amd64 linux-amd64 windows-amd64 -TOOLS_EXES = configtxgen configtxlator cryptogen discover idemixgen osnadmin peer +TOOLS_EXES = configtxgen configtxlator cryptogen discover idemixgen ledger osnadmin peer pkgmap.configtxgen := $(PKGNAME)/cmd/configtxgen pkgmap.configtxlator := $(PKGNAME)/cmd/configtxlator pkgmap.cryptogen := $(PKGNAME)/cmd/cryptogen pkgmap.discover := $(PKGNAME)/cmd/discover pkgmap.idemixgen := $(PKGNAME)/cmd/idemixgen +pkgmap.ledger := $(PKGNAME)/cmd/ledger pkgmap.orderer := $(PKGNAME)/cmd/orderer pkgmap.osnadmin := $(PKGNAME)/cmd/osnadmin pkgmap.peer := $(PKGNAME)/cmd/peer diff --git a/cmd/ledger/main.go b/cmd/ledger/main.go new file mode 100644 index 00000000000..500eef249cd --- /dev/null +++ b/cmd/ledger/main.go @@ -0,0 +1,58 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/hyperledger/fabric/internal/ledger" + "gopkg.in/alecthomas/kingpin.v2" +) + +const ( + resultFilename = "./result.json" +) + +var ( + app = kingpin.New("ledger", "Ledger Utility Tool") + + compare = app.Command("compare", "Compare two ledgers via their snapshots.") + snapshotPath1 = compare.Arg("snapshotPath1", "File path to first ledger snapshot.").Required().String() + snapshotPath2 = compare.Arg("snapshotPath2", "File path to second ledger snapshot.").Required().String() + + troubleshoot = app.Command("troubleshoot", "Identify potentially divergent transactions.") + + args = os.Args[1:] +) + +func main() { + kingpin.Version("0.0.1") + + command, err := app.Parse(args) + if err != nil { + kingpin.Fatalf("parsing arguments: %s. Try --help", err) + return + } + + switch command { + + case compare.FullCommand(): + + count, err := ledger.Compare(*snapshotPath1, *snapshotPath2, resultFilename) + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("\nSuccessfully compared snapshots. Result saved to %s. Total differences found: %v\n", resultFilename, count) + + case troubleshoot.FullCommand(): + + fmt.Println("Command TBD") + + } +} diff --git a/cmd/ledger/main_test.go b/cmd/ledger/main_test.go new file mode 100644 index 00000000000..c19bb3ca60e --- /dev/null +++ b/cmd/ledger/main_test.go @@ -0,0 +1,59 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package main + +import ( + "os/exec" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" +) + +func TestArguments(t *testing.T) { + testCases := map[string]struct { + exitCode int + args []string + }{ + "ledger": { + exitCode: 0, + args: []string{}, + }, + "ledger-help": { + exitCode: 0, + args: []string{"--help"}, + }, + "compare-help": { + exitCode: 0, + args: []string{"compare", "--help"}, + }, + "compare": { + exitCode: 1, + args: []string{"compare"}, + }, + "one-snapshot": { + exitCode: 1, + args: []string{"compare, snapshotDir1"}, + }, + } + + // Build ledger binary + gt := gomega.NewWithT(t) + ledger, err := gexec.Build("github.com/hyperledger/fabric/cmd/ledger") + gt.Expect(err).NotTo(gomega.HaveOccurred()) + defer gexec.CleanupBuildArtifacts() + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + cmd := exec.Command(ledger, testCase.args...) + session, err := gexec.Start(cmd, nil, nil) + gt.Expect(err).NotTo(gomega.HaveOccurred()) + gt.Eventually(session, 5*time.Second).Should(gexec.Exit(testCase.exitCode)) + }) + } +} diff --git a/common/ledger/snapshot/file.go b/common/ledger/snapshot/file.go index 2e7514a6d21..472f6bceffc 100644 --- a/common/ledger/snapshot/file.go +++ b/common/ledger/snapshot/file.go @@ -119,7 +119,7 @@ func (c *FileWriter) Close() error { // FileReader reads from a ledger snapshot file. This is expected to be used for loading the ledger snapshot data // during bootstrapping a channel from snapshot. The data should be read, using the functions `DecodeXXX`, // in the same sequence in which the data was written by the functions `EncodeXXX` in the `FileCreator`. -// Note that the FileReader does not verifies the hash of stream and it is expected that the hash has been verified +// Note that the FileReader does not verify the hash of stream and it is expected that the hash has been verified // by the consumer. Later, if we decide to perform this, on-the-side, while loading the snapshot data, the FileRedear, // like the FileCreator, would take a `hasher` as an input type FileReader struct { diff --git a/core/ledger/kvledger/kv_ledger.go b/core/ledger/kvledger/kv_ledger.go index 614c82074f5..72dca29522e 100644 --- a/core/ledger/kvledger/kv_ledger.go +++ b/core/ledger/kvledger/kv_ledger.go @@ -47,7 +47,7 @@ var ( // This implementation provides a key-value based data model type kvLedger struct { ledgerID string - bootSnapshotMetadata *snapshotMetadata + bootSnapshotMetadata *SnapshotMetadata blockStore *blkstorage.BlockStore pvtdataStore *pvtdatastorage.Store txmgr *txmgr.LockBasedTxMgr @@ -72,7 +72,7 @@ type kvLedger struct { type lgrInitializer struct { ledgerID string initializingFromSnapshot bool - bootSnapshotMetadata *snapshotMetadata + bootSnapshotMetadata *SnapshotMetadata blockStore *blkstorage.BlockStore pvtdataStore *pvtdatastorage.Store stateDB *privacyenabledstate.DB diff --git a/core/ledger/kvledger/kv_ledger_provider.go b/core/ledger/kvledger/kv_ledger_provider.go index a007232d46d..0b796d2e2e5 100644 --- a/core/ledger/kvledger/kv_ledger_provider.go +++ b/core/ledger/kvledger/kv_ledger_provider.go @@ -330,7 +330,7 @@ func (p *Provider) Open(ledgerID string) (ledger.PeerLedger, error) { return p.open(ledgerID, bootSnapshotMetadata, false) } -func (p *Provider) open(ledgerID string, bootSnapshotMetadata *snapshotMetadata, initializingFromSnapshot bool) (ledger.PeerLedger, error) { +func (p *Provider) open(ledgerID string, bootSnapshotMetadata *SnapshotMetadata, initializingFromSnapshot bool) (ledger.PeerLedger, error) { // Get the block store for a chain/ledger blockStore, err := p.blkStoreProvider.Open(ledgerID) if err != nil { @@ -479,17 +479,17 @@ func (p *Provider) runCleanup(ledgerID string) error { return p.idStore.deleteLedgerID(ledgerID) } -func snapshotMetadataFromProto(p *msgs.BootSnapshotMetadata) (*snapshotMetadata, error) { +func snapshotMetadataFromProto(p *msgs.BootSnapshotMetadata) (*SnapshotMetadata, error) { if p == nil { return nil, nil } - m := &snapshotMetadataJSONs{ + m := &SnapshotMetadataJSONs{ signableMetadata: p.SingableMetadata, additionalMetadata: p.AdditionalMetadata, } - return m.toMetadata() + return m.ToMetadata() } ////////////////////////////////////////////////////////////////////// diff --git a/core/ledger/kvledger/snapshot.go b/core/ledger/kvledger/snapshot.go index e2d9e53acfc..23145ddcb00 100644 --- a/core/ledger/kvledger/snapshot.go +++ b/core/ledger/kvledger/snapshot.go @@ -32,16 +32,16 @@ import ( ) const ( - snapshotSignableMetadataFileName = "_snapshot_signable_metadata.json" + SnapshotSignableMetadataFileName = "_snapshot_signable_metadata.json" snapshotAdditionalMetadataFileName = "_snapshot_additional_metadata.json" jsonFileIndent = " " simpleKeyValueDB = "SimpleKeyValueDB" ) -// snapshotSignableMetadata is used to build a JSON that represents a unique snapshot and +// SnapshotSignableMetadata is used to build a JSON that represents a unique snapshot and // can be signed by the peer. Hashsum of the resultant JSON is intended to be used as a single // hash of the snapshot, if need be. -type snapshotSignableMetadata struct { +type SnapshotSignableMetadata struct { ChannelName string `json:"channel_name"` LastBlockNumber uint64 `json:"last_block_number"` LastBlockHashInHex string `json:"last_block_hash"` @@ -50,7 +50,7 @@ type snapshotSignableMetadata struct { StateDBType string `json:"state_db_type"` } -func (m *snapshotSignableMetadata) toJSON() ([]byte, error) { +func (m *SnapshotSignableMetadata) ToJSON() ([]byte, error) { return json.MarshalIndent(m, "", jsonFileIndent) } @@ -59,22 +59,22 @@ type snapshotAdditionalMetadata struct { LastBlockCommitHashInHex string `json:"last_block_commit_hash"` } -func (m *snapshotAdditionalMetadata) toJSON() ([]byte, error) { +func (m *snapshotAdditionalMetadata) ToJSON() ([]byte, error) { return json.MarshalIndent(m, "", jsonFileIndent) } -type snapshotMetadata struct { - *snapshotSignableMetadata +type SnapshotMetadata struct { + *SnapshotSignableMetadata *snapshotAdditionalMetadata } -type snapshotMetadataJSONs struct { +type SnapshotMetadataJSONs struct { signableMetadata string additionalMetadata string } -func (j *snapshotMetadataJSONs) toMetadata() (*snapshotMetadata, error) { - metadata := &snapshotSignableMetadata{} +func (j *SnapshotMetadataJSONs) ToMetadata() (*SnapshotMetadata, error) { + metadata := &SnapshotSignableMetadata{} if err := json.Unmarshal([]byte(j.signableMetadata), metadata); err != nil { return nil, errors.Wrap(err, "error while unmarshalling signable metadata") } @@ -83,8 +83,8 @@ func (j *snapshotMetadataJSONs) toMetadata() (*snapshotMetadata, error) { if err := json.Unmarshal([]byte(j.additionalMetadata), additionalMetadata); err != nil { return nil, errors.Wrap(err, "error while unmarshalling additional metadata") } - return &snapshotMetadata{ - snapshotSignableMetadata: metadata, + return &SnapshotMetadata{ + SnapshotSignableMetadata: metadata, snapshotAdditionalMetadata: additionalMetadata, }, nil } @@ -179,7 +179,7 @@ func (l *kvLedger) generateSnapshotMetadataFiles( if stateDBType != ledger.CouchDB { stateDBType = simpleKeyValueDB } - signableMetadata := &snapshotSignableMetadata{ + signableMetadata := &SnapshotSignableMetadata{ ChannelName: l.ledgerID, LastBlockNumber: bcInfo.Height - 1, LastBlockHashInHex: hex.EncodeToString(bcInfo.CurrentBlockHash), @@ -188,11 +188,11 @@ func (l *kvLedger) generateSnapshotMetadataFiles( StateDBType: stateDBType, } - signableMetadataBytes, err := signableMetadata.toJSON() + signableMetadataBytes, err := signableMetadata.ToJSON() if err != nil { return errors.Wrap(err, "error while marshelling snapshot metadata to JSON") } - if err := fileutil.CreateAndSyncFile(filepath.Join(dir, snapshotSignableMetadataFileName), signableMetadataBytes, 0o444); err != nil { + if err := fileutil.CreateAndSyncFile(filepath.Join(dir, SnapshotSignableMetadataFileName), signableMetadataBytes, 0o444); err != nil { return err } @@ -210,7 +210,7 @@ func (l *kvLedger) generateSnapshotMetadataFiles( LastBlockCommitHashInHex: hex.EncodeToString(l.commitHash), } - additionalMetadataBytes, err := additionalMetadata.toJSON() + additionalMetadataBytes, err := additionalMetadata.ToJSON() if err != nil { return errors.Wrap(err, "error while marshalling snapshot additional metadata to JSON") } @@ -226,7 +226,7 @@ func (p *Provider) CreateFromSnapshot(snapshotDir string) (ledger.PeerLedger, st return nil, "", errors.WithMessagef(err, "error while loading metadata") } - metadata, err := metadataJSONs.toMetadata() + metadata, err := metadataJSONs.ToMetadata() if err != nil { return nil, "", errors.WithMessagef(err, "error while unmarshalling metadata") } @@ -349,8 +349,8 @@ func (p *Provider) CreateFromSnapshot(snapshotDir string) (ledger.PeerLedger, st return lgr, ledgerID, nil } -func loadSnapshotMetadataJSONs(snapshotDir string) (*snapshotMetadataJSONs, error) { - signableMetadataFilePath := filepath.Join(snapshotDir, snapshotSignableMetadataFileName) +func loadSnapshotMetadataJSONs(snapshotDir string) (*SnapshotMetadataJSONs, error) { + signableMetadataFilePath := filepath.Join(snapshotDir, SnapshotSignableMetadataFileName) signableMetadataBytes, err := ioutil.ReadFile(signableMetadataFilePath) if err != nil { return nil, err @@ -360,16 +360,16 @@ func loadSnapshotMetadataJSONs(snapshotDir string) (*snapshotMetadataJSONs, erro if err != nil { return nil, err } - return &snapshotMetadataJSONs{ + return &SnapshotMetadataJSONs{ signableMetadata: string(signableMetadataBytes), additionalMetadata: string(additionalMetadataBytes), }, nil } -func verifySnapshot(snapshotDir string, snapshotMetadata *snapshotMetadata, hashProvider ledger.HashProvider) error { +func verifySnapshot(snapshotDir string, snapshotMetadata *SnapshotMetadata, hashProvider ledger.HashProvider) error { if err := verifyFileHash( snapshotDir, - snapshotSignableMetadataFileName, + SnapshotSignableMetadataFileName, snapshotMetadata.SnapshotHashInHex, hashProvider, ); err != nil { diff --git a/core/ledger/kvledger/snapshot_test.go b/core/ledger/kvledger/snapshot_test.go index 0b8717905c1..ed17e2c89c9 100644 --- a/core/ledger/kvledger/snapshot_test.go +++ b/core/ledger/kvledger/snapshot_test.go @@ -621,7 +621,7 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st var snapshotDirForTest string var cleanup func() - var metadata *snapshotMetadata + var metadata *SnapshotMetadata var signableMetadataFile string var additionalMetadataFile string @@ -641,10 +641,10 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st metadataJSONs, err := loadSnapshotMetadataJSONs(snapshotDirForTest) require.NoError(t, err) - metadata, err = metadataJSONs.toMetadata() + metadata, err = metadataJSONs.ToMetadata() require.NoError(t, err) - signableMetadataFile = filepath.Join(snapshotDirForTest, snapshotSignableMetadataFileName) + signableMetadataFile = filepath.Join(snapshotDirForTest, SnapshotSignableMetadataFileName) additionalMetadataFile = filepath.Join(snapshotDirForTest, snapshotAdditionalMetadataFileName) provider = testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{}) @@ -655,12 +655,12 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st } overwriteModifiedSignableMetadata := func() { - signaleMetadataJSON, err := metadata.snapshotSignableMetadata.toJSON() + signaleMetadataJSON, err := metadata.SnapshotSignableMetadata.ToJSON() require.NoError(t, err) require.NoError(t, ioutil.WriteFile(signableMetadataFile, signaleMetadataJSON, 0o600)) metadata.snapshotAdditionalMetadata.SnapshotHashInHex = computeHashForTest(t, provider, signaleMetadataJSON) - additionalMetadataJSON, err := metadata.snapshotAdditionalMetadata.toJSON() + additionalMetadataJSON, err := metadata.snapshotAdditionalMetadata.ToJSON() require.NoError(t, err) require.NoError(t, ioutil.WriteFile(additionalMetadataFile, additionalMetadataJSON, 0o600)) } @@ -668,7 +668,7 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st overwriteDataFile := func(fileName string, content []byte) { filePath := filepath.Join(snapshotDirForTest, fileName) require.NoError(t, ioutil.WriteFile(filePath, content, 0o600)) - metadata.snapshotSignableMetadata.FilesAndHashes[fileName] = computeHashForTest(t, provider, content) + metadata.SnapshotSignableMetadata.FilesAndHashes[fileName] = computeHashForTest(t, provider, content) overwriteModifiedSignableMetadata() } @@ -676,7 +676,7 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st init(t) defer cleanup() - require.NoError(t, os.Remove(filepath.Join(snapshotDirForTest, snapshotSignableMetadataFileName))) + require.NoError(t, os.Remove(filepath.Join(snapshotDirForTest, SnapshotSignableMetadataFileName))) _, _, err := provider.CreateFromSnapshot(snapshotDirForTest) require.EqualError(t, err, @@ -773,7 +773,7 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st init(t) defer cleanup() - metadata.snapshotSignableMetadata.LastBlockHashInHex = "invalid-hex" + metadata.SnapshotSignableMetadata.LastBlockHashInHex = "invalid-hex" overwriteModifiedSignableMetadata() _, _, err := provider.CreateFromSnapshot(snapshotDirForTest) @@ -785,7 +785,7 @@ func testCreateLedgerFromSnapshotErrorPaths(t *testing.T, originalSnapshotDir st init(t) defer cleanup() - metadata.snapshotSignableMetadata.PreviousBlockHashInHex = "invalid-hex" + metadata.SnapshotSignableMetadata.PreviousBlockHashInHex = "invalid-hex" overwriteModifiedSignableMetadata() _, _, err := provider.CreateFromSnapshot(snapshotDirForTest) @@ -886,8 +886,8 @@ func verifySnapshotOutput( } // verify the contents of the file snapshot_metadata.json - m := &snapshotSignableMetadata{} - mJSON, err := ioutil.ReadFile(filepath.Join(snapshotDir, snapshotSignableMetadataFileName)) + m := &SnapshotSignableMetadata{} + mJSON, err := ioutil.ReadFile(filepath.Join(snapshotDir, SnapshotSignableMetadataFileName)) require.NoError(t, err) require.NoError(t, json.Unmarshal(mJSON, m)) @@ -896,7 +896,7 @@ func verifySnapshotOutput( previousBlockHashHex = hex.EncodeToString(o.previousBlockHash) } require.Equal(t, - &snapshotSignableMetadata{ + &SnapshotSignableMetadata{ ChannelName: o.ledgerID, LastBlockNumber: o.lastBlockNumber, LastBlockHashInHex: hex.EncodeToString(o.lastBlockHash), diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot.go index 74169e3456e..85d23fce770 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot.go @@ -21,10 +21,10 @@ import ( const ( snapshotFileFormat = byte(1) - pubStateDataFileName = "public_state.data" - pubStateMetadataFileName = "public_state.metadata" - pvtStateHashesFileName = "private_state_hashes.data" - pvtStateHashesMetadataFileName = "private_state_hashes.metadata" + PubStateDataFileName = "public_state.data" + PubStateMetadataFileName = "public_state.metadata" + PvtStateHashesFileName = "private_state_hashes.data" + PvtStateHashesMetadataFileName = "private_state_hashes.metadata" ) // ExportPubStateAndPvtStateHashes generates four files in the specified dir. The files, public_state.data and public_state.metadata @@ -38,8 +38,8 @@ func (s *DB) ExportPubStateAndPvtStateHashes(dir string, newHashFunc snapshot.Ne } defer itr.Close() - var pubStateWriter *snapshotWriter - var pvtStateHashesWriter *snapshotWriter + var pubStateWriter *SnapshotWriter + var pvtStateHashesWriter *SnapshotWriter for { kv, err := itr.Next() if err != nil { @@ -68,34 +68,34 @@ func (s *DB) ExportPubStateAndPvtStateHashes(dir string, newHashFunc snapshot.Ne } if pvtStateHashesWriter == nil { // encountered first time the pvt state hash element - pvtStateHashesWriter, err = newSnapshotWriter( + pvtStateHashesWriter, err = NewSnapshotWriter( dir, - pvtStateHashesFileName, - pvtStateHashesMetadataFileName, + PvtStateHashesFileName, + PvtStateHashesMetadataFileName, newHashFunc, ) if err != nil { return nil, err } - defer pvtStateHashesWriter.close() + defer pvtStateHashesWriter.Close() } - if err := pvtStateHashesWriter.addData(namespace, snapshotRecord); err != nil { + if err := pvtStateHashesWriter.AddData(namespace, snapshotRecord); err != nil { return nil, err } default: if pubStateWriter == nil { // encountered first time the pub state element - pubStateWriter, err = newSnapshotWriter( + pubStateWriter, err = NewSnapshotWriter( dir, - pubStateDataFileName, - pubStateMetadataFileName, + PubStateDataFileName, + PubStateMetadataFileName, newHashFunc, ) if err != nil { return nil, err } - defer pubStateWriter.close() + defer pubStateWriter.Close() } - if err := pubStateWriter.addData(namespace, snapshotRecord); err != nil { + if err := pubStateWriter.AddData(namespace, snapshotRecord); err != nil { return nil, err } } @@ -104,38 +104,39 @@ func (s *DB) ExportPubStateAndPvtStateHashes(dir string, newHashFunc snapshot.Ne snapshotFilesInfo := map[string][]byte{} if pubStateWriter != nil { - pubStateDataHash, pubStateMetadataHash, err := pubStateWriter.done() + pubStateDataHash, pubStateMetadataHash, err := pubStateWriter.Done() if err != nil { return nil, err } - snapshotFilesInfo[pubStateDataFileName] = pubStateDataHash - snapshotFilesInfo[pubStateMetadataFileName] = pubStateMetadataHash + snapshotFilesInfo[PubStateDataFileName] = pubStateDataHash + snapshotFilesInfo[PubStateMetadataFileName] = pubStateMetadataHash } if pvtStateHashesWriter != nil { - pvtStateHahshesDataHash, pvtStateHashesMetadataHash, err := pvtStateHashesWriter.done() + pvtStateHahshesDataHash, pvtStateHashesMetadataHash, err := pvtStateHashesWriter.Done() if err != nil { return nil, err } - snapshotFilesInfo[pvtStateHashesFileName] = pvtStateHahshesDataHash - snapshotFilesInfo[pvtStateHashesMetadataFileName] = pvtStateHashesMetadataHash + snapshotFilesInfo[PvtStateHashesFileName] = pvtStateHahshesDataHash + snapshotFilesInfo[PvtStateHashesMetadataFileName] = pvtStateHashesMetadataHash } return snapshotFilesInfo, nil } -// snapshotWriter generates two files, a data file and a metadata file. The datafile contains a series of tuples -// and the metadata file contains a series of tuples -type snapshotWriter struct { +// SnapshotWriter generates two files, a data file and a metadata file. The datafile contains a series of tuples +// and the metadata file contains a series of tuples +type SnapshotWriter struct { dataFile *snapshot.FileWriter metadataFile *snapshot.FileWriter metadata []*metadataRow } -func newSnapshotWriter( +// NewSnapshotWriter creates a new SnapshotWriter +func NewSnapshotWriter( dir, dataFileName, metadataFileName string, newHash func() (hash.Hash, error), -) (*snapshotWriter, error) { +) (*SnapshotWriter, error) { dataFilePath := filepath.Join(dir, dataFileName) metadataFilePath := filepath.Join(dir, metadataFileName) @@ -157,14 +158,14 @@ func newSnapshotWriter( if err != nil { return nil, err } - return &snapshotWriter{ + return &SnapshotWriter{ dataFile: dataFile, metadataFile: metadataFile, }, nil } -func (w *snapshotWriter) addData(namespace string, snapshotRecord *SnapshotRecord) error { +func (w *SnapshotWriter) AddData(namespace string, snapshotRecord *SnapshotRecord) error { if len(w.metadata) == 0 || w.metadata[len(w.metadata)-1].namespace != namespace { // new namespace begins w.metadata = append(w.metadata, @@ -180,7 +181,7 @@ func (w *snapshotWriter) addData(namespace string, snapshotRecord *SnapshotRecor return w.dataFile.EncodeProtoMessage(snapshotRecord) } -func (w *snapshotWriter) done() ([]byte, []byte, error) { +func (w *SnapshotWriter) Done() ([]byte, []byte, error) { dataHash, err := w.dataFile.Done() if err != nil { return nil, nil, err @@ -210,7 +211,7 @@ func writeMetadata(metadata []*metadataRow, metadataFile *snapshot.FileWriter) e return nil } -func (w *snapshotWriter) close() { +func (w *SnapshotWriter) Close() { if w == nil { return } @@ -261,12 +262,12 @@ func (p *DBProvider) ImportFromSnapshot( return nil } -// worldStateSnapshotReader encapsulates the two snapshotReaders - one for the public state and another for the +// worldStateSnapshotReader encapsulates the two SnapshotReaders - one for the public state and another for the // pvtstate hashes. worldStateSnapshotReader also implements the interface statedb.FullScanIterator. In the Next() // function, it returns the public state data and then the pvtstate hashes type worldStateSnapshotReader struct { - pubState *snapshotReader - pvtStateHashes *snapshotReader + pubState *SnapshotReader + pvtStateHashes *SnapshotReader pvtdataHashesConsumers []SnapshotPvtdataHashesConsumer encodeKeyHashesWithBase64 bool @@ -278,19 +279,19 @@ func newWorldStateSnapshotReader( pvtdataHashesConsumers []SnapshotPvtdataHashesConsumer, encodeKeyHashesWithBase64 bool, ) (*worldStateSnapshotReader, error) { - var pubState *snapshotReader - var pvtStateHashes *snapshotReader + var pubState *SnapshotReader + var pvtStateHashes *SnapshotReader var err error - pubState, err = newSnapshotReader( - dir, pubStateDataFileName, pubStateMetadataFileName, + pubState, err = NewSnapshotReader( + dir, PubStateDataFileName, PubStateMetadataFileName, ) if err != nil { return nil, err } - pvtStateHashes, err = newSnapshotReader( - dir, pvtStateHashesFileName, pvtStateHashesMetadataFileName, + pvtStateHashes, err = NewSnapshotReader( + dir, PvtStateHashesFileName, PvtStateHashesMetadataFileName, ) if err != nil { if pubState != nil { @@ -431,14 +432,13 @@ func (r *worldStateSnapshotReader) Close() { r.pvtStateHashes.Close() } -// snapshotReader reads data from a pair of files (a data file and the corresponding metadata file) -type snapshotReader struct { +// SnapshotReader reads data from a pair of files (a data file and the corresponding metadata file) +type SnapshotReader struct { dataFile *snapshot.FileReader cursor *cursor } -// If the passed in file name does not exist a nil response is returned -func newSnapshotReader(dir, dataFileName, metadataFileName string) (*snapshotReader, error) { +func NewSnapshotReader(dir, dataFileName, metadataFileName string) (*SnapshotReader, error) { dataFilePath := filepath.Join(dir, dataFileName) metadataFilePath := filepath.Join(dir, metadataFileName) exist, _, err := fileutil.FileExists(dataFilePath) @@ -470,7 +470,7 @@ func newSnapshotReader(dir, dataFileName, metadataFileName string) (*snapshotRea if err != nil { return nil, err } - return &snapshotReader{ + return &SnapshotReader{ dataFile: dataFile, cursor: &cursor{ metadata: metadata, @@ -501,7 +501,7 @@ func readMetadata(metadataFile *snapshot.FileReader) ([]*metadataRow, error) { return metadata, nil } -func (r *snapshotReader) Next() (string, *SnapshotRecord, error) { +func (r *SnapshotReader) Next() (string, *SnapshotRecord, error) { if !r.cursor.move() { return "", nil, nil } @@ -513,14 +513,14 @@ func (r *snapshotReader) Next() (string, *SnapshotRecord, error) { return r.cursor.currentNamespace(), snapshotRecord, nil } -func (r *snapshotReader) Close() { +func (r *SnapshotReader) Close() { if r == nil { return } r.dataFile.Close() } -func (r *snapshotReader) hasMore() bool { +func (r *SnapshotReader) hasMore() bool { return r.cursor.canMove() } diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot_test.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot_test.go index 20a7e0285b9..a61a81372b4 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot_test.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot_test.go @@ -209,14 +209,14 @@ func verifyExportedSnapshot( numFilesExpected := 0 if publicStateFilesExpected { numFilesExpected += 2 - require.Contains(t, filesAndHashes, pubStateDataFileName) - require.Contains(t, filesAndHashes, pubStateMetadataFileName) + require.Contains(t, filesAndHashes, PubStateDataFileName) + require.Contains(t, filesAndHashes, PubStateMetadataFileName) } if pvtdataHashesFilesExpected { numFilesExpected += 2 - require.Contains(t, filesAndHashes, pvtStateHashesFileName) - require.Contains(t, filesAndHashes, pvtStateHashesMetadataFileName) + require.Contains(t, filesAndHashes, PvtStateHashesFileName) + require.Contains(t, filesAndHashes, PvtStateHashesMetadataFileName) } for f, h := range filesAndHashes { @@ -337,19 +337,19 @@ func TestSnapshotReaderNextFunction(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(testdir) - w, err := newSnapshotWriter(testdir, "datafile", "metadatafile", testNewHashFunc) + w, err := NewSnapshotWriter(testdir, "datafile", "metadatafile", testNewHashFunc) require.NoError(t, err) snapshotRecord := &SnapshotRecord{ Key: []byte("key"), Value: []byte("value"), } - require.NoError(t, w.addData("ns", snapshotRecord)) - _, _, err = w.done() + require.NoError(t, w.AddData("ns", snapshotRecord)) + _, _, err = w.Done() require.NoError(t, err) - w.close() + w.Close() - r, err := newSnapshotReader(testdir, "datafile", "metadatafile") + r, err := NewSnapshotReader(testdir, "datafile", "metadatafile") require.NoError(t, err) require.NotNil(t, r) defer r.Close() @@ -401,7 +401,7 @@ func TestLoadMetadata(t *testing.T) { kvCounts: uint64(i), }) } - metadataFilePath := filepath.Join(testdir, pubStateMetadataFileName) + metadataFilePath := filepath.Join(testdir, PubStateMetadataFileName) metadataFileWriter, err := snapshot.CreateFile(metadataFilePath, snapshotFileFormat, testNewHashFunc) require.NoError(t, err) @@ -445,7 +445,7 @@ func TestSnapshotExportErrorPropagation(t *testing.T) { init() defer cleanup() - pubStateDataFilePath := filepath.Join(snapshotDir, pubStateDataFileName) + pubStateDataFilePath := filepath.Join(snapshotDir, PubStateDataFileName) _, err = os.Create(pubStateDataFilePath) require.NoError(t, err) _, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc) @@ -456,7 +456,7 @@ func TestSnapshotExportErrorPropagation(t *testing.T) { init() defer cleanup() - pubStateMetadataFilePath := filepath.Join(snapshotDir, pubStateMetadataFileName) + pubStateMetadataFilePath := filepath.Join(snapshotDir, PubStateMetadataFileName) _, err = os.Create(pubStateMetadataFilePath) require.NoError(t, err) _, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc) @@ -467,7 +467,7 @@ func TestSnapshotExportErrorPropagation(t *testing.T) { init() defer cleanup() - pvtStateHashesDataFilePath := filepath.Join(snapshotDir, pvtStateHashesFileName) + pvtStateHashesDataFilePath := filepath.Join(snapshotDir, PvtStateHashesFileName) _, err = os.Create(pvtStateHashesDataFilePath) require.NoError(t, err) _, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc) @@ -478,7 +478,7 @@ func TestSnapshotExportErrorPropagation(t *testing.T) { init() defer cleanup() - pvtStateHashesMetadataFilePath := filepath.Join(snapshotDir, pvtStateHashesMetadataFileName) + pvtStateHashesMetadataFilePath := filepath.Join(snapshotDir, PvtStateHashesMetadataFileName) _, err = os.Create(pvtStateHashesMetadataFilePath) require.NoError(t, err) _, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc) @@ -520,7 +520,7 @@ func TestSnapshotImportErrorPropagation(t *testing.T) { } // errors related to data files - for _, f := range []string{pubStateDataFileName, pvtStateHashesFileName} { + for _, f := range []string{PubStateDataFileName, PvtStateHashesFileName} { t.Run("error_while_checking_the_presence_of_"+f, func(t *testing.T) { init() defer cleanup() @@ -599,7 +599,7 @@ func TestSnapshotImportErrorPropagation(t *testing.T) { } // errors related to metadata files - for _, f := range []string{pubStateMetadataFileName, pvtStateHashesMetadataFileName} { + for _, f := range []string{PubStateMetadataFileName, PvtStateHashesMetadataFileName} { t.Run("error_while_reading_data_format_from_metadata_file:"+f, func(t *testing.T) { init() defer cleanup() diff --git a/internal/ledger/compare.go b/internal/ledger/compare.go new file mode 100644 index 00000000000..a6d9e822683 --- /dev/null +++ b/internal/ledger/compare.go @@ -0,0 +1,414 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package ledger + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric/common/ledger/util" + "github.com/hyperledger/fabric/core/ledger/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate" + "github.com/hyperledger/fabric/internal/fileutil" + "github.com/pkg/errors" +) + +// Compare - Compares two ledger snapshots and outputs the differences in snapshot records +// This function will overwrite the file at outputPath if it already exists +func Compare(snapshotDir1 string, snapshotDir2 string, outputFile string) (count int, err error) { + // Check the hashes between two files + hashPath1 := filepath.Join(snapshotDir1, kvledger.SnapshotSignableMetadataFileName) + hashPath2 := filepath.Join(snapshotDir2, kvledger.SnapshotSignableMetadataFileName) + + equal, err := snapshotsComparable(hashPath1, hashPath2) + if err != nil { + return 0, err + } + + if equal { + return 0, errors.New("both snapshots public state hashes are same. Aborting compare") + } + + // Create the output file + jsonOutputFile, err := newJSONFileWriter(outputFile) + if err != nil { + return 0, err + } + + // Create snapshot readers to read both snapshots + snapshotReader1, err := privacyenabledstate.NewSnapshotReader(snapshotDir1, + privacyenabledstate.PubStateDataFileName, privacyenabledstate.PubStateMetadataFileName) + if err != nil { + return 0, err + } + snapshotReader2, err := privacyenabledstate.NewSnapshotReader(snapshotDir2, + privacyenabledstate.PubStateDataFileName, privacyenabledstate.PubStateMetadataFileName) + if err != nil { + return 0, err + } + + // Read each snapshot record to begin looking for divergences + namespace1, snapshotRecord1, err := snapshotReader1.Next() + if err != nil { + return 0, err + } + namespace2, snapshotRecord2, err := snapshotReader2.Next() + if err != nil { + return 0, err + } + + // Main snapshot record comparison loop + for snapshotRecord1 != nil && snapshotRecord2 != nil { + + // nsKeys used for comparing snapshot records + key1 := &nsKey{namespace: namespace1, key: snapshotRecord1.Key} + key2 := &nsKey{namespace: namespace2, key: snapshotRecord2.Key} + + // Determine the difference in records by comparing nsKeys + switch nsKeyCompare(key1, key2) { + + case 0: // Keys are the same, look for a difference in records + if !(proto.Equal(snapshotRecord1, snapshotRecord2)) { + // Keys are the same but records are different + diffRecord, err := newDiffRecord(namespace1, snapshotRecord1, snapshotRecord2) + if err != nil { + return 0, err + } + // Add difference to output JSON file + err = jsonOutputFile.addRecord(*diffRecord) + if err != nil { + return 0, err + } + } + // Advance both snapshot readers + namespace1, snapshotRecord1, err = snapshotReader1.Next() + if err != nil { + return 0, err + } + namespace2, snapshotRecord2, err = snapshotReader2.Next() + if err != nil { + return 0, err + } + + case 1: // Key 1 is bigger, snapshot 1 is missing a record + // Snapshot 2 has the missing record, add missing to output JSON file + diffRecord, err := newDiffRecord(namespace2, nil, snapshotRecord2) + if err != nil { + return 0, err + } + // Add missing record to output JSON file + err = jsonOutputFile.addRecord(*diffRecord) + if err != nil { + return 0, err + } + // Advance the second snapshot reader + namespace2, snapshotRecord2, err = snapshotReader2.Next() + if err != nil { + return 0, err + } + + case -1: // Key 2 is bigger, snapshot 2 is missing a record + // Snapshot 1 has the missing record, add missing to output JSON file + diffRecord, err := newDiffRecord(namespace1, snapshotRecord1, nil) + if err != nil { + return 0, err + } + // Add missing record to output JSON file + err = jsonOutputFile.addRecord(*diffRecord) + if err != nil { + return 0, err + } + // Advance the first snapshot reader + namespace1, snapshotRecord1, err = snapshotReader1.Next() + if err != nil { + return 0, err + } + + default: + panic("unexpected code path: bug") + } + } + + // Check for tailing records + switch { + + case snapshotRecord1 != nil: // Snapshot 2 is missing a record + for snapshotRecord1 != nil { + // Add missing to output JSON file + diffRecord, err := newDiffRecord(namespace1, snapshotRecord1, nil) + if err != nil { + return 0, err + } + err = jsonOutputFile.addRecord(*diffRecord) + if err != nil { + return 0, err + } + namespace1, snapshotRecord1, err = snapshotReader1.Next() + if err != nil { + return 0, err + } + } + + case snapshotRecord2 != nil: // Snapshot 1 is missing a record + for snapshotRecord2 != nil { + // Add missing to output JSON file + diffRecord, err := newDiffRecord(namespace2, nil, snapshotRecord2) + if err != nil { + return 0, err + } + err = jsonOutputFile.addRecord(*diffRecord) + if err != nil { + return 0, err + } + namespace2, snapshotRecord2, err = snapshotReader2.Next() + if err != nil { + return 0, err + } + } + } + + err = jsonOutputFile.close() + if err != nil { + return 0, err + } + return jsonOutputFile.count, nil +} + +// diffRecord represents a diverging record in json +type diffRecord struct { + Namespace string `json:"namespace,omitempty"` + Key string `json:"key,omitempty"` + Record1 *snapshotRecord `json:"snapshotrecord1,omitempty"` + Record2 *snapshotRecord `json:"snapshotrecord2,omitempty"` +} + +// Creates a new diffRecord +func newDiffRecord(namespace string, record1 *privacyenabledstate.SnapshotRecord, + record2 *privacyenabledstate.SnapshotRecord) (*diffRecord, error) { + // Empty snapshot + s0 := snapshotRecord{ + Value: "", + BlockNum: 0, + TxNum: 0, + } + + var s1, s2 *snapshotRecord = &s0, &s0 // snapshot records + var k string // key + var err error + + // Snapshot2 has a missing record + if record1 != nil { + k = string(record1.Key) + s1, err = newSnapshotRecord(record1) + if err != nil { + return nil, err + } + } + // Snapshot1 has a missing record + if record2 != nil { + k = string(record2.Key) + s2, err = newSnapshotRecord(record2) + if err != nil { + return nil, err + } + } + + return &diffRecord{ + Namespace: namespace, + Key: k, + Record1: s1, + Record2: s2, + }, nil +} + +// snapshotRecord represents the data of a snapshot record in json +type snapshotRecord struct { + Value string `json:"value,omitempty"` + BlockNum uint64 `json:"blockNum,omitempty"` + TxNum uint64 `json:"txNum,omitempty"` +} + +// Creates a new SnapshotRecord +func newSnapshotRecord(record *privacyenabledstate.SnapshotRecord) (*snapshotRecord, error) { + blockNum, txNum, err := heightFromBytes(record.Version) + if err != nil { + return nil, err + } + + return &snapshotRecord{ + Value: string(record.Value), + BlockNum: blockNum, + TxNum: txNum, + }, nil +} + +// Obtain the block height and transaction height of a snapshot from its version bytes +func heightFromBytes(b []byte) (uint64, uint64, error) { + blockNum, n1, err := util.DecodeOrderPreservingVarUint64(b) + if err != nil { + return 0, 0, err + } + txNum, _, err := util.DecodeOrderPreservingVarUint64(b[n1:]) + if err != nil { + return 0, 0, err + } + + return blockNum, txNum, nil +} + +// nsKey is used to compare between snapshot records using both the namespace and key +type nsKey struct { + namespace string + key []byte +} + +// Compares two nsKeys +// Returns: +// -1 if k1 > k2 +// 1 if k1 < k2 +// 0 if k1 == k2 +func nsKeyCompare(k1, k2 *nsKey) int { + res := strings.Compare(k1.namespace, k2.namespace) + if res != 0 { + return res + } + return bytes.Compare(k1.key, k2.key) +} + +// Compares hashes of snapshots +func snapshotsComparable(fpath1 string, fpath2 string) (bool, error) { + var mdata1, mdata2 kvledger.SnapshotSignableMetadata + + // Open the first file + f, err := ioutil.ReadFile(fpath1) + if err != nil { + return false, err + } + + err = json.Unmarshal([]byte(f), &mdata1) + if err != nil { + return false, err + } + + // Open the second file + f, err = ioutil.ReadFile(fpath2) + if err != nil { + return false, err + } + + err = json.Unmarshal([]byte(f), &mdata2) + if err != nil { + return false, err + } + + if mdata1.ChannelName != mdata2.ChannelName { + return false, errors.Errorf("the supplied snapshots appear to be non-comparable. Channel names do not match."+ + "\nSnapshot1 channel name: %s\nSnapshot2 channel name: %s", mdata1.ChannelName, mdata2.ChannelName) + } + + if mdata1.LastBlockNumber != mdata2.LastBlockNumber { + return false, errors.Errorf("the supplied snapshots appear to be non-comparable. Last block numbers do not match."+ + "\nSnapshot1 last block number: %v\nSnapshot2 last block number: %v", mdata1.LastBlockNumber, mdata2.LastBlockNumber) + } + + if mdata1.LastBlockHashInHex != mdata2.LastBlockHashInHex { + return false, errors.Errorf("the supplied snapshots appear to be non-comparable. Last block hashes do not match."+ + "\nSnapshot1 last block hash: %s\nSnapshot2 last block hash: %s", mdata1.LastBlockHashInHex, mdata2.LastBlockHashInHex) + } + + if mdata1.StateDBType != mdata2.StateDBType { + return false, errors.Errorf("the supplied snapshots appear to be non-comparable. State db types do not match."+ + "\nSnapshot1 state db type: %s\nSnapshot2 state db type: %s", mdata1.StateDBType, mdata2.StateDBType) + } + + pubDataHash1 := mdata1.FilesAndHashes[privacyenabledstate.PubStateDataFileName] + pubMdataHash1 := mdata1.FilesAndHashes[privacyenabledstate.PubStateMetadataFileName] + + pubDataHash2 := mdata2.FilesAndHashes[privacyenabledstate.PubStateDataFileName] + pubMdataHash2 := mdata2.FilesAndHashes[privacyenabledstate.PubStateMetadataFileName] + + return (pubDataHash1 == pubDataHash2 && pubMdataHash1 == pubMdataHash2), nil +} + +// jsonArrayFileWriter writes a list of diffRecords to a json file +type jsonArrayFileWriter struct { + file *os.File + buffer *bufio.Writer + encoder *json.Encoder + firstRecordWritten bool + count int +} + +func newJSONFileWriter(filePath string) (*jsonArrayFileWriter, error) { + f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE, 0o644) + if err != nil { + return nil, err + } + + b := bufio.NewWriter(f) + // Opening bracket for beginning of diffRecord list + _, err = b.Write([]byte("[\n")) + if err != nil { + return nil, err + } + + return &jsonArrayFileWriter{ + file: f, + buffer: b, + encoder: json.NewEncoder(b), + }, nil +} + +func (w *jsonArrayFileWriter) addRecord(r interface{}) error { + // Add commas for records after the first in the list + if w.firstRecordWritten { + _, err := w.buffer.Write([]byte(",\n")) + if err != nil { + return err + } + } else { + w.firstRecordWritten = true + } + + err := w.encoder.Encode(r) + if err != nil { + return err + } + w.count++ + + return nil +} + +func (w *jsonArrayFileWriter) close() error { + _, err := w.buffer.Write([]byte("]\n")) + if err != nil { + return err + } + + err = w.buffer.Flush() + if err != nil { + return err + } + + err = w.file.Sync() + if err != nil { + return err + } + + err = fileutil.SyncParentDir(w.file.Name()) + if err != nil { + return err + } + + return w.file.Close() +} diff --git a/internal/ledger/compare_test.go b/internal/ledger/compare_test.go new file mode 100644 index 00000000000..55696102e89 --- /dev/null +++ b/internal/ledger/compare_test.go @@ -0,0 +1,608 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package ledger + +import ( + "bytes" + "crypto/sha256" + "fmt" + "hash" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/hyperledger/fabric/common/ledger/util" + "github.com/hyperledger/fabric/core/ledger/kvledger" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate" + "github.com/hyperledger/fabric/internal/fileutil" + "github.com/stretchr/testify/require" +) + +var testNewHashFunc = func() (hash.Hash, error) { + return sha256.New(), nil +} + +type testRecord struct { + namespace string + key string + value string + blockNum uint64 + txNum uint64 + metadata string +} + +func TestCompare(t *testing.T) { + // Each list of testRecords represents the records of a single snapshot + sampleRecords1 := []*testRecord{ + { + namespace: "ns1", key: "k1", value: "v1", + blockNum: 1, txNum: 1, metadata: "md1", + }, + { + namespace: "ns1", key: "k2", value: "v2", + blockNum: 1, txNum: 1, metadata: "md2", + }, + { + namespace: "ns2", key: "k1", value: "v3", + blockNum: 1, txNum: 2, metadata: "md3", + }, + { + namespace: "ns3", key: "k1", value: "v4", + blockNum: 2, txNum: 1, metadata: "md4", + }, + } + + sampleRecords2 := []*testRecord{ + { + namespace: "ns1", key: "k1", value: "v1", + blockNum: 1, txNum: 1, metadata: "md1", + }, + { + namespace: "ns1", key: "k2", value: "v2", + blockNum: 1, txNum: 1, metadata: "md2", + }, + { + namespace: "ns2", key: "k1", value: "v4", + blockNum: 1, txNum: 2, metadata: "md3", + }, + { + namespace: "ns3", key: "k1", value: "v4", + blockNum: 2, txNum: 1, metadata: "md4", + }, + } + + sampleRecords3 := []*testRecord{ + { + namespace: "ns1", key: "k1", value: "v1", + blockNum: 1, txNum: 1, metadata: "md1", + }, + { + namespace: "ns2", key: "k1", value: "v3", + blockNum: 1, txNum: 2, metadata: "md3", + }, + { + namespace: "ns3", key: "k1", value: "v4", + blockNum: 2, txNum: 1, metadata: "md4", + }, + } + + sampleRecords4 := []*testRecord{ + { + namespace: "ns1", key: "k1", value: "v1", + blockNum: 1, txNum: 1, metadata: "md1", + }, + { + namespace: "ns1", key: "k2", value: "v2", + blockNum: 1, txNum: 1, metadata: "md2", + }, + } + + // Signable metadata samples for snapshots + sampleSignableMetadata1 := &kvledger.SnapshotSignableMetadata{ + ChannelName: "testchannel", + LastBlockNumber: sampleRecords1[len(sampleRecords1)-1].blockNum, + LastBlockHashInHex: "last_block_hash", + PreviousBlockHashInHex: "previous_block_hash", + FilesAndHashes: map[string]string{ + "private_state_hashes.data": "private_state_hash1", + "private_state_hashes.metadata": "private_state_hash1", + "public_state.data": "public_state_hash1", + "public_state.metadata": "public_state_hash1", + "txids.data": "txids_hash1", + "txids.metadata": "txids_hash1", + }, + StateDBType: "testdatabase", + } + + sampleSignableMetadata2 := &kvledger.SnapshotSignableMetadata{ + ChannelName: "testchannel", + LastBlockNumber: sampleRecords1[len(sampleRecords1)-1].blockNum, + LastBlockHashInHex: "last_block_hash", + PreviousBlockHashInHex: "previous_block_hash", + FilesAndHashes: map[string]string{ + "private_state_hashes.data": "private_state_hash2", + "private_state_hashes.metadata": "private_state_hash2", + "public_state.data": "public_state_hash2", + "public_state.metadata": "public_state_hash2", + "txids.data": "txids_hash2", + "txids.metadata": "txids_hash2", + }, + StateDBType: "testdatabase", + } + + sampleSignableMetadata3 := &kvledger.SnapshotSignableMetadata{ + ChannelName: "testchannel", + LastBlockNumber: sampleRecords1[len(sampleRecords1)-1].blockNum, + LastBlockHashInHex: "last_block_hash", + PreviousBlockHashInHex: "previous_block_hash", + FilesAndHashes: map[string]string{ + "private_state_hashes.data": "private_state_hash2", + "private_state_hashes.metadata": "private_state_hash2", + "public_state.data": "public_state_hash2", + "public_state.metadata": "public_state_hash2", + "txids.data": "txids_hash2", + "txids.metadata": "txids_hash2", + }, + StateDBType: "testdatabase2", + } + + // Expected outputs + expectedDifferenceResult := `[ + { + "namespace" : "ns2", + "key" : "k1", + "snapshotrecord1" : { + "value" : "v3", + "blockNum" : 1, + "txNum" : 2 + }, + "snapshotrecord2" : { + "value" : "v4", + "blockNum" : 1, + "txNum" : 2 + } + } + ]` + expectedMissingResult1 := `[ + { + "namespace" : "ns1", + "key" : "k2", + "snapshotrecord1" : { + "value" : "v2", + "blockNum" : 1, + "txNum" : 1 + }, + "snapshotrecord2" : {} + } + ]` + expectedMissingResult2 := `[ + { + "namespace" : "ns1", + "key" : "k2", + "snapshotrecord1" : {}, + "snapshotrecord2" : { + "value" : "v2", + "blockNum" : 1, + "txNum" : 1 + } + } + ]` + expectedMissingTailResult1 := `[ + { + "namespace" : "ns2", + "key" : "k1", + "snapshotrecord1" : { + "value" : "v3", + "blockNum" : 1, + "txNum" : 2 + }, + "snapshotrecord2" : {} + }, + { + "namespace" : "ns3", + "key" : "k1", + "snapshotrecord1" : { + "value" : "v4", + "blockNum" : 2, + "txNum" : 1 + }, + "snapshotrecord2" : {} + } + ]` + expectedMissingTailResult2 := `[ + { + "namespace" : "ns2", + "key" : "k1", + "snapshotrecord1" : {}, + "snapshotrecord2" : { + "value" : "v3", + "blockNum" : 1, + "txNum" : 2 + } + }, + { + "namespace" : "ns3", + "key" : "k1", + "snapshotrecord1" : {}, + "snapshotrecord2" : { + "value" : "v4", + "blockNum" : 2, + "txNum" : 1 + } + } + ]` + expectedSamePubStateError := "both snapshots public state hashes are same. Aborting compare" + expectedDiffDatabaseError := "the supplied snapshots appear to be non-comparable. State db types do not match." + + "\nSnapshot1 state db type: testdatabase\nSnapshot2 state db type: testdatabase2" + + testCases := map[string]struct { + inputTestRecords1 []*testRecord + inputSignableMetadata1 *kvledger.SnapshotSignableMetadata + inputTestRecords2 []*testRecord + inputSignableMetadata2 *kvledger.SnapshotSignableMetadata + expectedOutput string + expectedOutputType string + expectedDiffCount int + }{ + // Snapshots have a single difference in record + "single-difference": { + inputTestRecords1: sampleRecords1, + inputSignableMetadata1: sampleSignableMetadata1, + inputTestRecords2: sampleRecords2, + inputSignableMetadata2: sampleSignableMetadata2, + expectedOutput: expectedDifferenceResult, + expectedOutputType: "json", + expectedDiffCount: 1, + }, + // Second snapshot is missing a record + "second-missing": { + inputTestRecords1: sampleRecords1, + inputSignableMetadata1: sampleSignableMetadata1, + inputTestRecords2: sampleRecords3, + inputSignableMetadata2: sampleSignableMetadata2, + expectedOutput: expectedMissingResult1, + expectedOutputType: "json", + expectedDiffCount: 1, + }, + // First snapshot is missing a record + "first-missing": { + inputTestRecords1: sampleRecords3, + inputSignableMetadata1: sampleSignableMetadata2, + inputTestRecords2: sampleRecords1, + inputSignableMetadata2: sampleSignableMetadata1, + expectedOutput: expectedMissingResult2, + expectedOutputType: "json", + expectedDiffCount: 1, + }, + // Second snapshot is missing tailing records + "second-missing-tail": { + inputTestRecords1: sampleRecords1, + inputSignableMetadata1: sampleSignableMetadata1, + inputTestRecords2: sampleRecords4, + inputSignableMetadata2: sampleSignableMetadata2, + expectedOutput: expectedMissingTailResult1, + expectedOutputType: "json", + expectedDiffCount: 2, + }, + // First snapshot is missing tailing records + "first-missing-tail": { + inputTestRecords1: sampleRecords4, + inputSignableMetadata1: sampleSignableMetadata2, + inputTestRecords2: sampleRecords1, + inputSignableMetadata2: sampleSignableMetadata1, + expectedOutput: expectedMissingTailResult2, + expectedOutputType: "json", + expectedDiffCount: 2, + }, + // Snapshots contain the same public state hashes + "same-hash": { + inputTestRecords1: sampleRecords1, + inputSignableMetadata1: sampleSignableMetadata1, + inputTestRecords2: sampleRecords1, + inputSignableMetadata2: sampleSignableMetadata1, + expectedOutput: expectedSamePubStateError, + expectedOutputType: "error", + expectedDiffCount: 0, + }, + // Snapshots contain different metadata (different databases in this case) that makes them non-comparable + "different-database": { + inputTestRecords1: sampleRecords1, + inputSignableMetadata1: sampleSignableMetadata1, + inputTestRecords2: sampleRecords2, + inputSignableMetadata2: sampleSignableMetadata3, + expectedOutput: expectedDiffDatabaseError, + expectedOutputType: "error", + expectedDiffCount: 0, + }, + } + + // Run test cases individually + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + // Create temporary directories for the sample snapshots and comparison results + snapshotDir1, err := ioutil.TempDir("", "sample-snapshot-dir1") + require.NoError(t, err) + defer os.RemoveAll(snapshotDir1) + snapshotDir2, err := ioutil.TempDir("", "sample-snapshot-dir2") + require.NoError(t, err) + defer os.RemoveAll(snapshotDir2) + resultsDir, err := ioutil.TempDir("", "results") + require.NoError(t, err) + defer os.RemoveAll(resultsDir) + + // Populate temporary directories with sample snapshot data + err = createSnapshot(snapshotDir1, testCase.inputTestRecords1, testCase.inputSignableMetadata1) + require.NoError(t, err) + err = createSnapshot(snapshotDir2, testCase.inputTestRecords2, testCase.inputSignableMetadata2) + require.NoError(t, err) + + // Compare snapshots and check the output + count, out, err := compareSnapshots(snapshotDir1, snapshotDir2, filepath.Join(resultsDir, "results.json")) + require.Equal(t, testCase.expectedDiffCount, count) + switch testCase.expectedOutputType { + case "error": + require.ErrorContains(t, err, testCase.expectedOutput) + case "json": + require.NoError(t, err) + require.JSONEq(t, testCase.expectedOutput, out) + default: + panic("unexpected code path: bug") + } + }) + } +} + +// createSnapshot generates a sample snapshot based on the passed in records and metadata +func createSnapshot(dir string, pubStateRecords []*testRecord, signableMetadata *kvledger.SnapshotSignableMetadata) error { + // Generate public state of sample snapshot + pubStateWriter, err := privacyenabledstate.NewSnapshotWriter( + dir, + privacyenabledstate.PubStateDataFileName, + privacyenabledstate.PubStateMetadataFileName, + testNewHashFunc, + ) + if err != nil { + return err + } + defer pubStateWriter.Close() + + // Add sample records to sample snapshot + for _, sample := range pubStateRecords { + err = pubStateWriter.AddData(sample.namespace, &privacyenabledstate.SnapshotRecord{ + Key: []byte(sample.key), + Value: []byte(sample.value), + Metadata: []byte(sample.metadata), + Version: toBytes(sample.blockNum, sample.txNum), + }) + if err != nil { + return err + } + } + + _, _, err = pubStateWriter.Done() + if err != nil { + return err + } + + // Generate the signable metadata files for sample snapshot + signableMetadataBytes, err := signableMetadata.ToJSON() + if err != nil { + return err + } + // Populate temporary directory with signable metadata files + err = fileutil.CreateAndSyncFile(filepath.Join(dir, kvledger.SnapshotSignableMetadataFileName), signableMetadataBytes, 0o444) + if err != nil { + return err + } + + return nil +} + +// compareSnapshots calls the Compare tool and extracts the result json +func compareSnapshots(ss1 string, ss2 string, res string) (int, string, error) { + // Run compare tool on snapshots + count, err := Compare(ss1, ss2, res) + if err != nil { + return 0, "", err + } + // Read results of output + resBytes, err := ioutil.ReadFile(res) + if err != nil { + return 0, "", err + } + out, err := ioutil.ReadAll(bytes.NewReader(resBytes)) + if err != nil { + return 0, "", err + } + + return count, string(out), nil +} + +// toBytes serializes the Height +func toBytes(blockNum uint64, txNum uint64) []byte { + blockNumBytes := util.EncodeOrderPreservingVarUint64(blockNum) + txNumBytes := util.EncodeOrderPreservingVarUint64(txNum) + return append(blockNumBytes, txNumBytes...) +} + +func TestJSONArrayFileWriter(t *testing.T) { + sampleDiffRecords := []diffRecord{ + { + Namespace: "abc", + Key: "key-52", + Record1: &snapshotRecord{ + Value: "red", + BlockNum: 254, + TxNum: 21, + }, + Record2: &snapshotRecord{ + Value: "blue", + BlockNum: 254, + TxNum: 21, + }, + }, + { + Namespace: "abc", + Key: "key-73", + Record1: &snapshotRecord{ + Value: "green", + BlockNum: 472, + TxNum: 61, + }, + Record2: &snapshotRecord{ + Value: "", + BlockNum: 0, + TxNum: 0, + }, + }, + { + Namespace: "xyz", + Key: "key-44", + Record1: &snapshotRecord{ + Value: "", + BlockNum: 0, + TxNum: 0, + }, + Record2: &snapshotRecord{ + Value: "purple", + BlockNum: 566, + TxNum: 3, + }, + }, + } + + expectedResult := `[ + { + "namespace" : "abc", + "key" : "key-52", + "snapshotrecord1" : { + "value" : "red", + "blockNum" : 254, + "txNum" : 21 + }, + "snapshotrecord2" : { + "value" : "blue", + "blockNum" : 254, + "txNum" : 21 + } + }, + { + "namespace" : "abc", + "key" : "key-73", + "snapshotrecord1" : { + "value" : "green", + "blockNum" : 472, + "txNum" : 61 + }, + "snapshotrecord2" : {} + }, + { + "namespace" : "xyz", + "key" : "key-44", + "snapshotrecord1" : {}, + "snapshotrecord2" : { + "value" : "purple", + "blockNum" : 566, + "txNum" : 3 + } + } + ]` + + // Create temporary directory for output + resultDir, err := ioutil.TempDir("", "result") + require.NoError(t, err) + defer os.RemoveAll(resultDir) + // Create the output file + jsonResultFile, err := newJSONFileWriter(filepath.Join(resultDir, "result.json")) + require.NoError(t, err) + // Write each sample diffRecord to the output file and close + for _, diffRecord := range sampleDiffRecords { + err = jsonResultFile.addRecord(diffRecord) + require.NoError(t, err) + } + err = jsonResultFile.close() + require.NoError(t, err) + + // Read results of output and compare + resBytes, err := ioutil.ReadFile(filepath.Join(resultDir, "result.json")) + require.NoError(t, err) + res, err := ioutil.ReadAll(bytes.NewReader(resBytes)) + require.NoError(t, err) + + require.JSONEq(t, expectedResult, string(res)) +} + +func TestNSKeyCompare(t *testing.T) { + testCases := []struct { + nsKey1 nsKey + nsKey2 nsKey + expected int + }{ + { + nsKey1: nsKey{ + namespace: "xyz", + key: []byte("key-1"), + }, + nsKey2: nsKey{ + namespace: "abc", + key: []byte("key-2"), + }, + expected: 1, + }, + { + nsKey1: nsKey{ + namespace: "abc", + key: []byte("key-1"), + }, + nsKey2: nsKey{ + namespace: "xyz", + key: []byte("key-2"), + }, + expected: -1, + }, + { + nsKey1: nsKey{ + namespace: "abc", + key: []byte("key-1"), + }, + nsKey2: nsKey{ + namespace: "abc", + key: []byte("key-2"), + }, + expected: -1, + }, + { + nsKey1: nsKey{ + namespace: "abc", + key: []byte("key-2"), + }, + nsKey2: nsKey{ + namespace: "abc", + key: []byte("key-1"), + }, + expected: 1, + }, + { + nsKey1: nsKey{ + namespace: "abc", + key: []byte("key-1"), + }, + nsKey2: nsKey{ + namespace: "abc", + key: []byte("key-1"), + }, + expected: 0, + }, + } + + for i, testCase := range testCases { + t.Run(fmt.Sprint(i+1), func(t *testing.T) { + require.Equal(t, nsKeyCompare(&testCase.nsKey1, &testCase.nsKey2), testCase.expected) + }) + } +}