diff --git a/core/chaincode/handler_test.go b/core/chaincode/handler_test.go index 0359d616253..879e049c2de 100644 --- a/core/chaincode/handler_test.go +++ b/core/chaincode/handler_test.go @@ -2550,7 +2550,7 @@ var _ = Describe("Handler", func() { }) It("sends an execute message to the chaincode with the correct proposal", func() { - expectedMessage := *incomingMessage + expectedMessage := proto.Clone(incomingMessage).(*pb.ChaincodeMessage) expectedMessage.Proposal = expectedSignedProp close(responseNotifier) @@ -2559,7 +2559,7 @@ var _ = Describe("Handler", func() { Eventually(fakeChatStream.SendCallCount).Should(Equal(1)) Consistently(fakeChatStream.SendCallCount).Should(Equal(1)) msg := fakeChatStream.SendArgsForCall(0) - Expect(msg).To(Equal(&expectedMessage)) + Expect(msg).To(Equal(expectedMessage)) Expect(msg.Proposal).To(Equal(expectedSignedProp)) }) diff --git a/core/common/privdata/simplecollection.go b/core/common/privdata/simplecollection.go index 27eb37fd8b9..6081c3dcb8c 100644 --- a/core/common/privdata/simplecollection.go +++ b/core/common/privdata/simplecollection.go @@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0 package privdata import ( + "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/peer" "github.com/hyperledger/fabric/common/policies" "github.com/hyperledger/fabric/msp" @@ -20,7 +21,7 @@ type SimpleCollection struct { name string accessPolicy policies.Policy memberOrgs map[string]struct{} - conf peer.StaticCollectionConfig + conf *peer.StaticCollectionConfig } type SimpleCollectionPersistenceConfigs struct { @@ -86,7 +87,7 @@ func (sc *SimpleCollection) Setup(collectionConfig *peer.StaticCollectionConfig, if collectionConfig == nil { return errors.New("Nil config passed to collection setup") } - sc.conf = *collectionConfig + sc.conf = proto.Clone(collectionConfig).(*peer.StaticCollectionConfig) sc.name = collectionConfig.GetName() // get the access signature policy envelope diff --git a/core/endorser/endorser_test.go b/core/endorser/endorser_test.go index f280640be5d..bcc45842d96 100644 --- a/core/endorser/endorser_test.go +++ b/core/endorser/endorser_test.go @@ -9,7 +9,8 @@ package endorser_test import ( "context" "fmt" - "sort" + "slices" + "strings" "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-lib-go/common/metrics/metricsfakes" @@ -29,32 +30,22 @@ import ( "github.com/pkg/errors" ) -type CcInterest pb.ChaincodeInterest - -func (a CcInterest) Len() int { return len(a.Chaincodes) } -func (a CcInterest) Swap(i, j int) { - a.Chaincodes[i], a.Chaincodes[j] = a.Chaincodes[j], a.Chaincodes[i] -} - -func (a CcInterest) Less(i, j int) bool { - ai := a.Chaincodes[i] - aj := a.Chaincodes[j] - - if ai.Name != aj.Name { - return ai.Name < aj.Name +func sortChaincodeCall(a, b *pb.ChaincodeCall) int { + if a.Name != b.Name { + return strings.Compare(a.Name, b.Name) } - if len(ai.CollectionNames) != len(aj.CollectionNames) { - return len(ai.CollectionNames) < len(aj.CollectionNames) + if len(a.CollectionNames) != len(b.CollectionNames) { + return len(a.CollectionNames) - len(b.CollectionNames) } - for ii := range ai.CollectionNames { - if ai.CollectionNames[ii] != aj.CollectionNames[ii] { - return ai.CollectionNames[ii] < aj.CollectionNames[ii] + for ii := range a.CollectionNames { + if a.CollectionNames[ii] != b.CollectionNames[ii] { + return strings.Compare(a.CollectionNames[ii], b.CollectionNames[ii]) } } - return false + return 0 } var _ = Describe("Endorser", func() { @@ -1132,7 +1123,7 @@ var _ = Describe("Endorser", func() { proposalResponse, err := e.ProcessProposal(context.TODO(), signedProposal) Expect(err).NotTo(HaveOccurred()) - sort.Sort(CcInterest(*proposalResponse.Interest)) + slices.SortFunc(proposalResponse.Interest.Chaincodes, sortChaincodeCall) Expect(proposalResponse.Interest).To(Equal(&pb.ChaincodeInterest{ Chaincodes: []*pb.ChaincodeCall{{ Name: "myCC", @@ -1169,7 +1160,7 @@ var _ = Describe("Endorser", func() { proposalResponse, err := e.ProcessProposal(context.TODO(), signedProposal) Expect(err).NotTo(HaveOccurred()) - sort.Sort(CcInterest(*proposalResponse.Interest)) + slices.SortFunc(proposalResponse.Interest.Chaincodes, sortChaincodeCall) Expect(proposalResponse.Interest).To(Equal(&pb.ChaincodeInterest{ Chaincodes: []*pb.ChaincodeCall{ { @@ -1209,7 +1200,7 @@ var _ = Describe("Endorser", func() { proposalResponse, err := e.ProcessProposal(context.TODO(), signedProposal) Expect(err).NotTo(HaveOccurred()) - sort.Sort(CcInterest(*proposalResponse.Interest)) + slices.SortFunc(proposalResponse.Interest.Chaincodes, sortChaincodeCall) Expect(proposalResponse.Interest).To(Equal(&pb.ChaincodeInterest{ Chaincodes: []*pb.ChaincodeCall{{ Name: "myCC", @@ -1248,7 +1239,7 @@ var _ = Describe("Endorser", func() { proposalResponse, err := e.ProcessProposal(context.TODO(), signedProposal) Expect(err).NotTo(HaveOccurred()) - sort.Sort(CcInterest(*proposalResponse.Interest)) + slices.SortFunc(proposalResponse.Interest.Chaincodes, sortChaincodeCall) Expect(proto.Equal( proposalResponse.Interest, &pb.ChaincodeInterest{ @@ -1293,7 +1284,7 @@ var _ = Describe("Endorser", func() { proposalResponse, err := e.ProcessProposal(context.TODO(), signedProposal) Expect(err).NotTo(HaveOccurred()) - sort.Sort(CcInterest(*proposalResponse.Interest)) + slices.SortFunc(proposalResponse.Interest.Chaincodes, sortChaincodeCall) Expect(proto.Equal( proposalResponse.Interest, &pb.ChaincodeInterest{ diff --git a/core/peer/deliverevents.go b/core/peer/deliverevents.go index 7b8364f04ae..0a540a57bcf 100644 --- a/core/peer/deliverevents.go +++ b/core/peer/deliverevents.go @@ -114,8 +114,7 @@ func (fbrs *filteredBlockResponseSender) SendBlockResponse( signedData *protoutil.SignedData, ) error { // Generates filtered block response - b := blockEvent(*block) - filteredBlock, err := b.toFilteredBlock() + filteredBlock, err := toFilteredBlock(block) if err != nil { logger.Warningf("Failed to generate filtered block due to: %s", err) return fbrs.SendStatusResponse(common.Status_BAD_REQUEST) @@ -231,10 +230,6 @@ func (bprs *blockAndPrivateDataResponseSender) getPrivateData( // transactionActions aliasing for peer.TransactionAction pointers slice type transactionActions []*peer.TransactionAction -// blockEvent an alias for common.Block structure, used to -// extend with auxiliary functionality -type blockEvent common.Block - // DeliverFiltered sends a stream of blocks to a client after commitment func (s *DeliverServer) DeliverFiltered(srv peer.Deliver_DeliverFilteredServer) error { logger.Debugf("Starting new DeliverFiltered handler") @@ -289,7 +284,7 @@ func (s *DeliverServer) DeliverWithPrivateData(srv peer.Deliver_DeliverWithPriva return err } -func (block *blockEvent) toFilteredBlock() (*peer.FilteredBlock, error) { +func toFilteredBlock(block *common.Block) (*peer.FilteredBlock, error) { filteredBlock := &peer.FilteredBlock{ Number: block.Header.Number, } diff --git a/core/transientstore/persistance.pb.go b/core/transientstore/persistence.pb.go similarity index 97% rename from core/transientstore/persistance.pb.go rename to core/transientstore/persistence.pb.go index 91a43a30e49..68234d69550 100644 --- a/core/transientstore/persistance.pb.go +++ b/core/transientstore/persistence.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: persistance.proto +// source: persistence.proto package transientstore @@ -64,7 +64,7 @@ func init() { proto.RegisterType((*PendingDeleteStorageList)(nil), "transientstore.PendingDeleteStorageList") } -func init() { proto.RegisterFile("persistance.proto", fileDescriptor_a374ca4de69122a4) } +func init() { proto.RegisterFile("persistence.proto", fileDescriptor_a374ca4de69122a4) } var fileDescriptor_a374ca4de69122a4 = []byte{ // 145 bytes of a gzipped FileDescriptorProto diff --git a/core/transientstore/persistance.proto b/core/transientstore/persistence.proto similarity index 100% rename from core/transientstore/persistance.proto rename to core/transientstore/persistence.proto diff --git a/discovery/client/client.go b/discovery/client/client.go index 100b13c37b9..03bd1c08c13 100644 --- a/discovery/client/client.go +++ b/discovery/client/client.go @@ -157,9 +157,9 @@ func (req *Request) addQueryMapping(queryType protoext.QueryType, key string) { // Send sends the request and returns the response, or error on failure func (c *Client) Send(ctx context.Context, req *Request, auth *discovery.AuthInfo) (Response, error) { - reqToBeSent := *req.Request + reqToBeSent := proto.Clone(req.Request).(*discovery.Request) reqToBeSent.Authentication = auth - payload, err := proto.Marshal(&reqToBeSent) + payload, err := proto.Marshal(reqToBeSent) if err != nil { return nil, errors.Wrap(err, "failed marshaling Request to bytes") } diff --git a/discovery/cmd/config_test.go b/discovery/cmd/config_test.go index 5aa7f32b7d5..c08198b3512 100644 --- a/discovery/cmd/config_test.go +++ b/discovery/cmd/config_test.go @@ -11,7 +11,7 @@ import ( "fmt" "testing" - . "github.com/hyperledger/fabric-protos-go/discovery" + discprotos "github.com/hyperledger/fabric-protos-go/discovery" "github.com/hyperledger/fabric-protos-go/msp" "github.com/hyperledger/fabric/cmd/common" discovery "github.com/hyperledger/fabric/discovery/cmd" @@ -79,13 +79,13 @@ func TestParseConfigResponse(t *testing.T) { }) t.Run("Success", func(t *testing.T) { - chanRes.On("Config").Return(&ConfigResult{ + chanRes.On("Config").Return(&discprotos.ConfigResult{ Msps: map[string]*msp.FabricMSPConfig{ "Org1MSP": nil, "Org2MSP": nil, }, - Orderers: map[string]*Endpoints{ - "OrdererMSP": {Endpoint: []*Endpoint{ + Orderers: map[string]*discprotos.Endpoints{ + "OrdererMSP": {Endpoint: []*discprotos.Endpoint{ {Host: "orderer1", Port: 7050}, }}, }, diff --git a/discovery/service_test.go b/discovery/service_test.go index 1740186c321..df5caeeff89 100644 --- a/discovery/service_test.go +++ b/discovery/service_test.go @@ -593,7 +593,7 @@ type peers []*discovery.Peer func (ps peers) exists(p *discovery.Peer) error { var found bool for _, q := range ps { - if reflect.DeepEqual(*p, *q) { + if proto.Equal(p, q) { found = true break } diff --git a/discovery/test/integration_test.go b/discovery/test/integration_test.go index 235499195d0..d65785cb615 100644 --- a/discovery/test/integration_test.go +++ b/discovery/test/integration_test.go @@ -22,15 +22,14 @@ import ( "testing" "time" - discovery_protos "github.com/hyperledger/fabric-protos-go/discovery" - "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-lib-go/bccsp/sw" bccsp "github.com/hyperledger/fabric-lib-go/bccsp/utils" "github.com/hyperledger/fabric-protos-go/common" + discprotos "github.com/hyperledger/fabric-protos-go/discovery" "github.com/hyperledger/fabric-protos-go/gossip" msprotos "github.com/hyperledger/fabric-protos-go/msp" - . "github.com/hyperledger/fabric-protos-go/peer" + "github.com/hyperledger/fabric-protos-go/peer" "github.com/hyperledger/fabric/common/cauthdsl" "github.com/hyperledger/fabric/common/configtx" "github.com/hyperledger/fabric/common/crypto/tlsgen" @@ -128,21 +127,21 @@ func TestGreenPath(t *testing.T) { service.lsccMetadataManager.query.On("GetState", "lscc", "cc2").Return(cc2Bytes, nil) service.lsccMetadataManager.query.On("GetState", "lscc", "cc2~collection").Return(collectionConfigBytes, nil) - ccWithCollection := &ChaincodeInterest{ - Chaincodes: []*ChaincodeCall{ + ccWithCollection := &peer.ChaincodeInterest{ + Chaincodes: []*peer.ChaincodeCall{ {Name: "cc2", CollectionNames: []string{"col12"}}, }, } - cc2cc := &ChaincodeInterest{ - Chaincodes: []*ChaincodeCall{ + cc2cc := &peer.ChaincodeInterest{ + Chaincodes: []*peer.ChaincodeCall{ {Name: "cc1"}, {Name: "cc2"}, }, } // Send all queries req := disc.NewRequest().AddLocalPeersQuery().OfChannel("mychannel") - col1 := &ChaincodeCall{Name: "cc2", CollectionNames: []string{"col1"}} - nonExistentCollection := &ChaincodeCall{Name: "cc2", CollectionNames: []string{"col3"}} + col1 := &peer.ChaincodeCall{Name: "cc2", CollectionNames: []string{"col1"}} + nonExistentCollection := &peer.ChaincodeCall{Name: "cc2", CollectionNames: []string{"col3"}} _ = nonExistentCollection req, err := req.AddPeersQuery().AddPeersQuery(col1).AddPeersQuery(nonExistentCollection).AddConfigQuery().AddEndorsersQuery(cc2cc, ccWithCollection) @@ -241,8 +240,8 @@ func TestEndorsementComputationFailure(t *testing.T) { // Now test a collection query that should fail because cc2's endorsement policy is Org1MSP AND org2MSP // but the collection is configured only to have peers from Org1MSP - ccWithCollection := &ChaincodeInterest{ - Chaincodes: []*ChaincodeCall{ + ccWithCollection := &peer.ChaincodeInterest{ + Chaincodes: []*peer.ChaincodeCall{ {Name: "cc2", CollectionNames: []string{"col1"}}, }, } @@ -266,8 +265,8 @@ func TestLedgerFailure(t *testing.T) { service.lsccMetadataManager.query.On("GetState", "lscc", "cc2").Return(nil, errors.New("IO error")) service.lsccMetadataManager.query.On("GetState", "lscc", "cc12~collection").Return(collectionConfigBytes, nil) - ccWithCollection := &ChaincodeInterest{ - Chaincodes: []*ChaincodeCall{ + ccWithCollection := &peer.ChaincodeInterest{ + Chaincodes: []*peer.ChaincodeCall{ {Name: "cc1"}, {Name: "cc2", CollectionNames: []string{"col1"}}, }, @@ -331,7 +330,7 @@ func TestRevocation(t *testing.T) { type client struct { *disc.Client - *discovery_protos.AuthInfo + *discprotos.AuthInfo conn *grpc.ClientConn } @@ -482,7 +481,7 @@ func createClientAndService(t *testing.T, testdir string) (*client, *client, *se AuthCachePurgeRetentionRatio: 0.5, }, sup) - discovery_protos.RegisterDiscoveryServer(gRPCServer.Server(), svc) + discprotos.RegisterDiscoveryServer(gRPCServer.Server(), svc) require.NoError(t, err) go gRPCServer.Start() @@ -503,7 +502,7 @@ func createClientAndService(t *testing.T, testdir string) (*client, *client, *se require.NoError(t, err) userSigner := createUserSigner(t) - wrapperUserClient := &client{AuthInfo: &discovery_protos.AuthInfo{ + wrapperUserClient := &client{AuthInfo: &discprotos.AuthInfo{ ClientIdentity: userSigner.Creator, ClientTlsCertHash: util.ComputeSHA256(clientKeyPair.TLSCert.Raw), }, conn: conn} @@ -511,7 +510,7 @@ func createClientAndService(t *testing.T, testdir string) (*client, *client, *se wrapperUserClient.Client = disc.NewClient(wrapperUserClient.newConnection, userSigner.Sign, signerCacheSize) adminSigner := createAdminSigner(t) - wrapperAdminClient := &client{AuthInfo: &discovery_protos.AuthInfo{ + wrapperAdminClient := &client{AuthInfo: &discprotos.AuthInfo{ ClientIdentity: adminSigner.Creator, ClientTlsCertHash: util.ComputeSHA256(clientKeyPair.TLSCert.Raw), }, conn: conn} @@ -883,14 +882,14 @@ func aliveMsg(pkiID gcommon.PKIidType) gdisc.NetworkMember { } func buildCollectionConfig(col2principals map[string][]*msprotos.MSPPrincipal) []byte { - collections := &CollectionConfigPackage{} + collections := &peer.CollectionConfigPackage{} for col, principals := range col2principals { - collections.Config = append(collections.Config, &CollectionConfig{ - Payload: &CollectionConfig_StaticCollectionConfig{ - StaticCollectionConfig: &StaticCollectionConfig{ + collections.Config = append(collections.Config, &peer.CollectionConfig{ + Payload: &peer.CollectionConfig_StaticCollectionConfig{ + StaticCollectionConfig: &peer.StaticCollectionConfig{ Name: col, - MemberOrgsPolicy: &CollectionPolicyConfig{ - Payload: &CollectionPolicyConfig_SignaturePolicy{ + MemberOrgsPolicy: &peer.CollectionPolicyConfig{ + Payload: &peer.CollectionPolicyConfig_SignaturePolicy{ SignaturePolicy: &common.SignaturePolicyEnvelope{ Identities: principals, }, diff --git a/gossip/discovery/discovery_impl.go b/gossip/discovery/discovery_impl.go index c280a7e86b9..b3884782a05 100644 --- a/gossip/discovery/discovery_impl.go +++ b/gossip/discovery/discovery_impl.go @@ -15,7 +15,8 @@ import ( "sync" "time" - proto "github.com/hyperledger/fabric-protos-go/gossip" + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric-protos-go/gossip" "github.com/hyperledger/fabric/gossip/common" "github.com/hyperledger/fabric/gossip/gossip/msgstore" "github.com/hyperledger/fabric/gossip/protoext" @@ -414,7 +415,7 @@ func (d *gossipDiscoveryImpl) handleMsgFromComm(msg protoext.ReceivedMessage) { } } -func (d *gossipDiscoveryImpl) sendMemResponse(targetMember *proto.Member, internalEndpoint string, nonce uint64) { +func (d *gossipDiscoveryImpl) sendMemResponse(targetMember *gossip.Member, internalEndpoint string, nonce uint64) { d.logger.Debug("Entering", protoext.MemberToString(targetMember)) targetPeer := &NetworkMember{ @@ -446,10 +447,10 @@ func (d *gossipDiscoveryImpl) sendMemResponse(targetMember *proto.Member, intern defer d.logger.Debug("Exiting, replying with", protoext.MembershipResponseToString(memResp)) - msg, err := protoext.NoopSign(&proto.GossipMessage{ - Tag: proto.GossipMessage_EMPTY, + msg, err := protoext.NoopSign(&gossip.GossipMessage{ + Tag: gossip.GossipMessage_EMPTY, Nonce: nonce, - Content: &proto.GossipMessage_MemRes{ + Content: &gossip.GossipMessage_MemRes{ MemRes: memResp, }, }) @@ -461,7 +462,7 @@ func (d *gossipDiscoveryImpl) sendMemResponse(targetMember *proto.Member, intern d.comm.SendToPeer(targetPeer, msg) } -func (d *gossipDiscoveryImpl) createMembershipResponse(aliveMsg *protoext.SignedGossipMessage, targetMember *NetworkMember) *proto.MembershipResponse { +func (d *gossipDiscoveryImpl) createMembershipResponse(aliveMsg *protoext.SignedGossipMessage, targetMember *NetworkMember) *gossip.MembershipResponse { shouldBeDisclosed, omitConcealedFields := d.disclosurePolicy(targetMember) if !shouldBeDisclosed(aliveMsg) { @@ -471,7 +472,7 @@ func (d *gossipDiscoveryImpl) createMembershipResponse(aliveMsg *protoext.Signed d.lock.RLock() defer d.lock.RUnlock() - deadPeers := []*proto.Envelope{} + deadPeers := []*gossip.Envelope{} for _, dm := range d.deadMembership.ToSlice() { @@ -481,7 +482,7 @@ func (d *gossipDiscoveryImpl) createMembershipResponse(aliveMsg *protoext.Signed deadPeers = append(deadPeers, omitConcealedFields(dm)) } - var aliveSnapshot []*proto.Envelope + var aliveSnapshot []*gossip.Envelope for _, am := range d.aliveMembership.ToSlice() { if !shouldBeDisclosed(am) { continue @@ -489,7 +490,7 @@ func (d *gossipDiscoveryImpl) createMembershipResponse(aliveMsg *protoext.Signed aliveSnapshot = append(aliveSnapshot, omitConcealedFields(am)) } - return &proto.MembershipResponse{ + return &gossip.MembershipResponse{ Alive: append(aliveSnapshot, omitConcealedFields(aliveMsg)), Dead: deadPeers, } @@ -534,7 +535,7 @@ func (d *gossipDiscoveryImpl) handleAliveMessage(m *protoext.SignedGossipMessage if isDead { if before(lastDeadTS, ts) { // resurrect peer - d.resurrectMember(m, *ts) + d.resurrectMember(m, proto.Clone(ts).(*gossip.PeerTime)) } else if !same(lastDeadTS, ts) { d.logger.Debug("got old alive message about dead peer ", protoext.MemberToString(m.GetAliveMsg().Membership), "lastDeadTS:", lastDeadTS, "but got ts:", ts) } @@ -589,7 +590,7 @@ func (d *gossipDiscoveryImpl) isSentByMe(m *protoext.SignedGossipMessage) bool { return true } -func (d *gossipDiscoveryImpl) resurrectMember(am *protoext.SignedGossipMessage, t proto.PeerTime) { +func (d *gossipDiscoveryImpl) resurrectMember(am *protoext.SignedGossipMessage, t *gossip.PeerTime) { d.logger.Debug("Entering, AliveMessage:", am, "t:", t) defer d.logger.Debug("Exiting") d.lock.Lock() @@ -662,22 +663,22 @@ func (d *gossipDiscoveryImpl) sendMembershipRequest(member *NetworkMember, inclu d.comm.SendToPeer(member, req) } -func (d *gossipDiscoveryImpl) createMembershipRequest(includeInternalEndpoint bool) (*proto.GossipMessage, error) { +func (d *gossipDiscoveryImpl) createMembershipRequest(includeInternalEndpoint bool) (*gossip.GossipMessage, error) { am, err := d.createSignedAliveMessage(includeInternalEndpoint) if err != nil { return nil, errors.WithStack(err) } - req := &proto.MembershipRequest{ + req := &gossip.MembershipRequest{ SelfInformation: am.Envelope, // TODO: sending the known peers is not secure because the remote peer might shouldn't know // TODO: about the known peers. I'm deprecating this until a secure mechanism will be implemented. // TODO: See FAB-2570 for tracking this issue. Known: [][]byte{}, } - return &proto.GossipMessage{ - Tag: proto.GossipMessage_EMPTY, + return &gossip.GossipMessage{ + Tag: gossip.GossipMessage_EMPTY, Nonce: uint64(0), - Content: &proto.GossipMessage_MemReq{ + Content: &gossip.GossipMessage_MemReq{ MemReq: req, }, }, nil @@ -778,7 +779,7 @@ func (d *gossipDiscoveryImpl) periodicalSendAlive() { } } -func (d *gossipDiscoveryImpl) aliveMsgAndInternalEndpoint() (*proto.GossipMessage, string) { +func (d *gossipDiscoveryImpl) aliveMsgAndInternalEndpoint() (*gossip.GossipMessage, string) { d.lock.Lock() defer d.lock.Unlock() d.seqNum++ @@ -787,16 +788,16 @@ func (d *gossipDiscoveryImpl) aliveMsgAndInternalEndpoint() (*proto.GossipMessag meta := d.self.Metadata pkiID := d.self.PKIid internalEndpoint := d.self.InternalEndpoint - msg := &proto.GossipMessage{ - Tag: proto.GossipMessage_EMPTY, - Content: &proto.GossipMessage_AliveMsg{ - AliveMsg: &proto.AliveMessage{ - Membership: &proto.Member{ + msg := &gossip.GossipMessage{ + Tag: gossip.GossipMessage_EMPTY, + Content: &gossip.GossipMessage_AliveMsg{ + AliveMsg: &gossip.AliveMessage{ + Membership: &gossip.Member{ Endpoint: endpoint, Metadata: meta, PkiId: pkiID, }, - Timestamp: &proto.PeerTime{ + Timestamp: &gossip.PeerTime{ IncNum: d.incTime, SeqNum: seqNum, }, @@ -980,7 +981,7 @@ func (d *gossipDiscoveryImpl) UpdateEndpoint(endpoint string) { } func (d *gossipDiscoveryImpl) Self() NetworkMember { - var env *proto.Envelope + var env *gossip.Envelope msg, _ := d.aliveMsgAndInternalEndpoint() sMsg, err := protoext.NoopSign(msg) if err != nil { @@ -1031,11 +1032,11 @@ func equalPKIid(a, b common.PKIidType) bool { return bytes.Equal(a, b) } -func same(a *timestamp, b *proto.PeerTime) bool { +func same(a *timestamp, b *gossip.PeerTime) bool { return uint64(a.incTime.UnixNano()) == b.IncNum && a.seqNum == b.SeqNum } -func before(a *timestamp, b *proto.PeerTime) bool { +func before(a *timestamp, b *gossip.PeerTime) bool { return (uint64(a.incTime.UnixNano()) == b.IncNum && a.seqNum < b.SeqNum) || uint64(a.incTime.UnixNano()) < b.IncNum } diff --git a/gossip/privdata/pull.go b/gossip/privdata/pull.go index dfb5f0266bc..a4186da3684 100644 --- a/gossip/privdata/pull.go +++ b/gossip/privdata/pull.go @@ -15,6 +15,7 @@ import ( "sync" "time" + "github.com/golang/protobuf/proto" protosgossip "github.com/hyperledger/fabric-protos-go/gossip" commonutil "github.com/hyperledger/fabric/common/util" "github.com/hyperledger/fabric/core/common/privdata" @@ -372,7 +373,7 @@ func (p *puller) scatterRequests(peersDigestMapping peer2Digests) []util.Subscri } type ( - peer2Digests map[remotePeer][]protosgossip.PvtDataDigest + peer2Digests map[remotePeer][]*protosgossip.PvtDataDigest noneSelectedPeers []discovery.NetworkMember ) @@ -380,7 +381,7 @@ func (p *puller) assignDigestsToPeers(members []discovery.NetworkMember, dig2Fil if p.logger.IsEnabledFor(zapcore.DebugLevel) { p.logger.Debug("Matching", members, "to", dig2Filter.String()) } - res := make(map[remotePeer][]protosgossip.PvtDataDigest) + res := make(map[remotePeer][]*protosgossip.PvtDataDigest) // Create a mapping between peer and digests to ask for for dig, collectionFilter := range dig2Filter { // Find a peer that is a preferred peer @@ -396,7 +397,7 @@ func (p *puller) assignDigestsToPeers(members []discovery.NetworkMember, dig2Fil } // Add the peer to the mapping from peer to digest slice peer := remotePeer{pkiID: string(selectedPeer.PKIID), endpoint: selectedPeer.Endpoint} - res[peer] = append(res[peer], protosgossip.PvtDataDigest{ + res[peer] = append(res[peer], &protosgossip.PvtDataDigest{ TxId: dig.TxId, BlockSeq: dig.BlockSeq, SeqInBlock: dig.SeqInBlock, @@ -688,13 +689,10 @@ func randomizeMemberList(members []discovery.NetworkMember) []discovery.NetworkM return res } -func digestsAsPointerSlice(digests []protosgossip.PvtDataDigest) []*protosgossip.PvtDataDigest { +func digestsAsPointerSlice(digests []*protosgossip.PvtDataDigest) []*protosgossip.PvtDataDigest { res := make([]*protosgossip.PvtDataDigest, len(digests)) for i, dig := range digests { - // re-introduce dig variable to allocate - // new address for each iteration - dig := dig - res[i] = &dig + res[i] = proto.Clone(dig).(*protosgossip.PvtDataDigest) } return res } diff --git a/integration/nwo/configblock.go b/integration/nwo/configblock.go index 3b0a0ba70ac..7af1cc560ed 100644 --- a/integration/nwo/configblock.go +++ b/integration/nwo/configblock.go @@ -294,7 +294,7 @@ func UnmarshalBlockFromFile(blockFile string) *common.Block { type ConsensusMetadataMutator func([]byte) []byte // MSPMutator receives FabricMSPConfig and mutates it. -type MSPMutator func(config msp.FabricMSPConfig) msp.FabricMSPConfig +type MSPMutator func(config *msp.FabricMSPConfig) *msp.FabricMSPConfig // UpdateConsensusMetadata executes a config update that updates the consensus // metadata according to the given ConsensusMetadataMutator. @@ -332,7 +332,7 @@ func UpdateOrdererMSP(network *Network, peer *Peer, orderer *Orderer, channel, o Expect(err).NotTo(HaveOccurred()) // Mutate it as we are asked - *fabricConfig = mutateMSP(*fabricConfig) + fabricConfig = mutateMSP(fabricConfig) // Wrap it back into the config mspConfig.Config = protoutil.MarshalOrPanic(fabricConfig) diff --git a/integration/raft/cft_test.go b/integration/raft/cft_test.go index c7262365d27..721066a4ef0 100644 --- a/integration/raft/cft_test.go +++ b/integration/raft/cft_test.go @@ -345,7 +345,7 @@ var _ = Describe("EndToEnd Crash Fault Tolerance", func() { Expect(err).NotTo(HaveOccurred()) By("Adding new ordering service node") - addConsenter(network, peer, orderers[0], "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, orderers[0], "testchannel", &etcdraft.Consenter{ ServerTlsCert: ordererCert, ClientTlsCert: ordererCert, Host: "127.0.0.1", diff --git a/integration/raft/config_test.go b/integration/raft/config_test.go index fe66e4459a7..695084ed5da 100644 --- a/integration/raft/config_test.go +++ b/integration/raft/config_test.go @@ -295,7 +295,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { Expect(err).NotTo(HaveOccurred()) By("Adding the second orderer") - addConsenter(network, peer, orderer, "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, orderer, "testchannel", &etcdraft.Consenter{ ServerTlsCert: secondOrdererCertificate, ClientTlsCert: secondOrdererCertificate, Host: "127.0.0.1", @@ -403,9 +403,10 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { By("Expanding the TLS root CA certificates and adding orderer3 to the channel") updateOrdererMSPAndConsensusMetadata(network, peer, orderer, "testchannel", "OrdererOrg", - func(config msp.FabricMSPConfig) msp.FabricMSPConfig { // MSP mutator - config.TlsRootCerts = append(config.TlsRootCerts, caCert) - return config + func(config *msp.FabricMSPConfig) *msp.FabricMSPConfig { // MSP mutator + tmp := proto.Clone(config).(*msp.FabricMSPConfig) + tmp.TlsRootCerts = append(tmp.TlsRootCerts, caCert) + return tmp }, func(metadata *etcdraft.ConfigMetadata) { // etcdraft mutator metadata.Consenters = append(metadata.Consenters, &etcdraft.Consenter{ @@ -448,7 +449,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { peer, orderer, "testchannel", - etcdraft.Consenter{ + &etcdraft.Consenter{ ServerTlsCert: client.Cert, ClientTlsCert: client.Cert, Host: newConsenterHost, @@ -527,7 +528,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { extendNetwork(network) certificateRotations := refreshOrdererPEMs(network) - swap := func(o *nwo.Orderer, certificate []byte, c etcdraft.Consenter) { + swap := func(o *nwo.Orderer, certificate []byte, c *etcdraft.Consenter) { updateEtcdRaftMetadata(network, peer, o, "testchannel", func(metadata *etcdraft.ConfigMetadata) { var newConsenters []*etcdraft.Consenter for _, consenter := range metadata.Consenters { @@ -536,7 +537,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { } newConsenters = append(newConsenters, consenter) } - newConsenters = append(newConsenters, &c) + newConsenters = append(newConsenters, c) metadata.Consenters = newConsenters }) @@ -566,7 +567,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { port := network.OrdererPort(targetOrderer, nwo.ClusterPort) fmt.Fprintf(GinkgoWriter, "Rotating certificate of orderer node %d\n", target+1) - swap(submitterOrderer, rotation.oldCert, etcdraft.Consenter{ + swap(submitterOrderer, rotation.oldCert, &etcdraft.Consenter{ ServerTlsCert: rotation.newCert, ClientTlsCert: rotation.newCert, Host: "127.0.0.1", @@ -708,7 +709,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { By(fmt.Sprintf("Adding the future certificate of orderer node %d", i)) for _, channelName := range []string{"testchannel"} { - addConsenter(network, peer, o, channelName, etcdraft.Consenter{ + addConsenter(network, peer, o, channelName, &etcdraft.Consenter{ ServerTlsCert: rotation.newCert, ClientTlsCert: rotation.newCert, Host: "127.0.0.1", @@ -799,7 +800,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { orderer4Certificate, err := os.ReadFile(orderer4CertificatePath) Expect(err).NotTo(HaveOccurred()) - addConsenter(network, peer, o1, "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, o1, "testchannel", &etcdraft.Consenter{ ServerTlsCert: orderer4Certificate, ClientTlsCert: orderer4Certificate, Host: "127.0.0.1", @@ -864,7 +865,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { Eventually(orderer4Runner.Err(), network.EventuallyTimeout).Should(gbytes.Say("channel does not exist")) By("Adding orderer4 to testchannel2") - addConsenter(network, peer, o1, "testchannel2", etcdraft.Consenter{ + addConsenter(network, peer, o1, "testchannel2", &etcdraft.Consenter{ ServerTlsCert: orderer4Certificate, ClientTlsCert: orderer4Certificate, Host: "127.0.0.1", @@ -902,7 +903,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { }, orderers, peer, network) By("Adding orderer4 to testchannel3") - addConsenter(network, peer, o1, "testchannel3", etcdraft.Consenter{ + addConsenter(network, peer, o1, "testchannel3", &etcdraft.Consenter{ ServerTlsCert: orderer4Certificate, ClientTlsCert: orderer4Certificate, Host: "127.0.0.1", @@ -994,7 +995,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { ordererCertificate, err := os.ReadFile(ordererCertificatePath) Expect(err).NotTo(HaveOccurred()) - addConsenter(network, peer, o1, "mychannel", etcdraft.Consenter{ + addConsenter(network, peer, o1, "mychannel", &etcdraft.Consenter{ ServerTlsCert: ordererCertificate, ClientTlsCert: ordererCertificate, Host: "127.0.0.1", @@ -1021,7 +1022,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { ordererCertificatePath = filepath.Join(network.OrdererLocalTLSDir(o3), "server.crt") ordererCertificate, err = os.ReadFile(ordererCertificatePath) Expect(err).NotTo(HaveOccurred()) - addConsenter(network, peer, o1, "mychannel", etcdraft.Consenter{ + addConsenter(network, peer, o1, "mychannel", &etcdraft.Consenter{ ServerTlsCert: ordererCertificate, ClientTlsCert: ordererCertificate, Host: "127.0.0.1", @@ -1100,7 +1101,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { peer, o2, "testchannel", - etcdraft.Consenter{ + &etcdraft.Consenter{ ServerTlsCert: certificatesOfOrderers[0].newCert, ClientTlsCert: certificatesOfOrderers[0].newCert, Host: "127.0.0.1", @@ -1231,7 +1232,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { Eventually(assertFollower(expectedInfo, orderers[secondEvictedNode]), network.EventuallyTimeout, 100*time.Millisecond).Should(BeTrue()) By("Re-adding first evicted orderer") - addConsenter(network, peer, network.Orderers[survivor], "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, network.Orderers[survivor], "testchannel", &etcdraft.Consenter{ Host: "127.0.0.1", Port: uint32(network.OrdererPort(orderers[firstEvictedNode], nwo.ClusterPort)), ClientTlsCert: server1CertBytes, @@ -1288,7 +1289,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { removeConsenter(network, peer, o2, "testchannel", server1CertBytes) By("Adding the evicted orderer back to the application channel") - addConsenter(network, peer, o2, "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, o2, "testchannel", &etcdraft.Consenter{ ServerTlsCert: server1CertBytes, ClientTlsCert: server1CertBytes, Host: "127.0.0.1", @@ -1299,7 +1300,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { removeConsenter(network, peer, o2, "testchannel", server1CertBytes) By("Adding the evicted orderer back to the application channel again") - addConsenter(network, peer, o2, "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, o2, "testchannel", &etcdraft.Consenter{ ServerTlsCert: server1CertBytes, ClientTlsCert: server1CertBytes, Host: "127.0.0.1", @@ -1459,7 +1460,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { }, network.EventuallyTimeout).Should(Equal(expectedChannelInfo)) By("Adding the evicted orderer back to the application channel") - addConsenter(network, peer, o2, "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, o2, "testchannel", &etcdraft.Consenter{ ServerTlsCert: o1cert, ClientTlsCert: o1cert, Host: "127.0.0.1", @@ -1553,7 +1554,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { ordererCertificate, err := os.ReadFile(ordererCertificatePath) Expect(err).NotTo(HaveOccurred()) - addConsenter(network, peer, orderers[0], "testchannel", etcdraft.Consenter{ + addConsenter(network, peer, orderers[0], "testchannel", &etcdraft.Consenter{ ServerTlsCert: ordererCertificate, ClientTlsCert: ordererCertificate, Host: "127.0.0.1", @@ -1951,7 +1952,8 @@ func revokeReaderAccess(network *nwo.Network, channel string, orderer *nwo.Order // consenterAdder constructs configs that can be used by `UpdateOrdererConfig` // to add a consenter. -func consenterAdder(n *nwo.Network, peer *nwo.Peer, orderer *nwo.Orderer, channel string, consenter etcdraft.Consenter) (current, updated *common.Config) { +func consenterAdder(n *nwo.Network, peer *nwo.Peer, orderer *nwo.Orderer, channel string, consenter *etcdraft.Consenter) (current, updated *common.Config) { + tmp := proto.Clone(consenter).(*etcdraft.Consenter) config := nwo.GetConfig(n, peer, orderer, channel) updatedConfig := proto.Clone(config).(*common.Config) @@ -1964,7 +1966,7 @@ func consenterAdder(n *nwo.Network, peer *nwo.Peer, orderer *nwo.Orderer, channe err = proto.Unmarshal(consensusTypeValue.Metadata, metadata) Expect(err).NotTo(HaveOccurred()) - metadata.Consenters = append(metadata.Consenters, &consenter) + metadata.Consenters = append(metadata.Consenters, tmp) consensusTypeValue.Metadata, err = proto.Marshal(metadata) Expect(err).NotTo(HaveOccurred()) @@ -2013,9 +2015,10 @@ func consenterRemover(n *nwo.Network, peer *nwo.Peer, orderer *nwo.Orderer, chan } // addConsenter adds a new consenter to the given channel. -func addConsenter(n *nwo.Network, peer *nwo.Peer, orderer *nwo.Orderer, channel string, consenter etcdraft.Consenter) { +func addConsenter(n *nwo.Network, peer *nwo.Peer, orderer *nwo.Orderer, channel string, consenter *etcdraft.Consenter) { + tmp := proto.Clone(consenter).(*etcdraft.Consenter) updateEtcdRaftMetadata(n, peer, orderer, channel, func(metadata *etcdraft.ConfigMetadata) { - metadata.Consenters = append(metadata.Consenters, &consenter) + metadata.Consenters = append(metadata.Consenters, tmp) }) } @@ -2078,7 +2081,7 @@ func updateOrdererMSPAndConsensusMetadata(network *nwo.Network, peer *nwo.Peer, Expect(err).NotTo(HaveOccurred()) // Mutate it as we are asked - *fabricConfig = mutateMSP(*fabricConfig) + fabricConfig = mutateMSP(fabricConfig) // Wrap it back into the config mspConfig.Config = protoutil.MarshalOrPanic(fabricConfig) diff --git a/internal/pkg/gateway/endorse.go b/internal/pkg/gateway/endorse.go index 202aec3878b..d305c876ba5 100644 --- a/internal/pkg/gateway/endorse.go +++ b/internal/pkg/gateway/endorse.go @@ -241,8 +241,8 @@ func (gs *Server) planFromFirstEndorser(ctx context.Context, channel string, cha var protectedCollections []string if hasTransientData { for _, call := range interest.GetChaincodes() { - ccc := *call // shallow copy - originalInterest.Chaincodes = append(originalInterest.Chaincodes, &ccc) + ccc := proto.Clone(call).(*peer.ChaincodeCall) + originalInterest.Chaincodes = append(originalInterest.Chaincodes, ccc) if call.NoPrivateReads { call.NoPrivateReads = false protectedCollections = append(protectedCollections, call.CollectionNames...) diff --git a/orderer/common/cluster/clusterservice_test.go b/orderer/common/cluster/clusterservice_test.go index deb3e54aac5..b5fb3fecbd6 100644 --- a/orderer/common/cluster/clusterservice_test.go +++ b/orderer/common/cluster/clusterservice_test.go @@ -17,6 +17,7 @@ import ( "testing" "time" + "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-lib-go/common/flogging" "github.com/hyperledger/fabric-lib-go/common/metrics/disabled" "github.com/hyperledger/fabric-protos-go/common" @@ -39,8 +40,7 @@ import ( var ( sourceNodeID uint64 = 1 destinationNodeID uint64 = 2 - streamID uint64 = 111 - nodeAuthRequest = orderer.NodeAuthRequest{ + nodeAuthRequest = &orderer.NodeAuthRequest{ Version: 0, FromId: sourceNodeID, ToId: destinationNodeID, @@ -54,28 +54,11 @@ var ( }, }, } - nodeTranRequest = &orderer.ClusterNodeServiceStepRequest{ - Payload: &orderer.ClusterNodeServiceStepRequest_NodeTranrequest{ - NodeTranrequest: &orderer.NodeTransactionOrderRequest{ - LastValidationSeq: 0, - Payload: &common.Envelope{}, - }, - }, - } nodeInvalidRequest = &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeConrequest{ NodeConrequest: nil, }, } - submitRequest = &orderer.StepRequest{ - Payload: &orderer.StepRequest_SubmitRequest{ - SubmitRequest: &orderer.SubmitRequest{ - LastValidationSeq: 0, - Payload: &common.Envelope{}, - Channel: "mychannel", - }, - }, - } ) // Cluster Step stream for TLS Export Keying Material retrival @@ -141,9 +124,9 @@ func TestClusterServiceStep(t *testing.T) { NodeIdentity: serverKeyPair.Cert, } - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) sessionBinding, err := cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) require.NoError(t, err) @@ -166,7 +149,7 @@ func TestClusterServiceStep(t *testing.T) { stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -264,9 +247,9 @@ func TestClusterServiceStep(t *testing.T) { NodeIdentity: serverKeyPair.Cert, } - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) sessionBinding, err := cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) require.NoError(t, err) @@ -292,7 +275,7 @@ func TestClusterServiceStep(t *testing.T) { stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -314,7 +297,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request completes successfully", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) var err error handler := &mocks.Handler{} @@ -335,7 +318,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { stream := &mocks.ClusterStepStream{} stream.On("Context").Return(stepStream.Context()) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) authRequest.SessionBinding, _ = cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) asnSignFields, _ := asn1.Marshal(cluster.AuthRequestSignature{ @@ -356,7 +339,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } svc.ConfigureNodeCerts(authRequest.Channel, []*common.Consenter{{Id: uint32(authRequest.FromId), Identity: clientKeyPair1.Cert}, {Id: uint32(authRequest.ToId), Identity: svc.NodeIdentity}}) @@ -366,7 +349,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request fails with sessing binding error", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) handler := &mocks.Handler{} svc := &cluster.ClusterService{ @@ -384,7 +367,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { svc.ConfigureNodeCerts(authRequest.Channel, []*common.Consenter{{Id: uint32(authRequest.FromId), Identity: clientKeyPair1.Cert}}) stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } _, err := svc.VerifyAuthRequest(stream, stepRequest) @@ -394,10 +377,10 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request fails with session binding mismatch", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -438,7 +421,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request fails with channel config not found", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) handler := &mocks.Handler{} svc := &cluster.ClusterService{ @@ -453,7 +436,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { stream := &mocks.ClusterStepStream{} stream.On("Context").Return(stepStream.Context()) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) authRequest.SessionBinding, _ = cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) asnSignFields, _ := asn1.Marshal(cluster.AuthRequestSignature{ Version: int64(authRequest.Version), @@ -472,7 +455,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { authRequest.Signature = sig stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -484,10 +467,10 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request fails with node not part of the channel", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -504,7 +487,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { stream := &mocks.ClusterStepStream{} stream.On("Context").Return(stepStream.Context()) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) authRequest.SessionBinding, _ = cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) asnSignFields, _ := asn1.Marshal(cluster.AuthRequestSignature{ Version: int64(authRequest.Version), @@ -531,7 +514,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request fails with signature mismatch", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) handler := &mocks.Handler{} serverKeyPair, _ := ca.NewServerCertKeyPair() @@ -548,7 +531,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { stream := &mocks.ClusterStepStream{} stream.On("Context").Return(stepStream.Context()) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) authRequest.SessionBinding, _ = cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) asnSignFields, _ := asn1.Marshal(cluster.AuthRequestSignature{ @@ -568,7 +551,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { authRequest.Signature = sig stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -580,7 +563,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { t.Run("Verify auth request fails with signature mismatch", func(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) handler := &mocks.Handler{} var err error @@ -601,7 +584,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { stream := &mocks.ClusterStepStream{} stream.On("Context").Return(stepStream.Context()) - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) authRequest.SessionBinding, _ = cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) asnSignFields, _ := asn1.Marshal(cluster.AuthRequestSignature{ @@ -624,7 +607,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { authRequest.Signature = sig stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -637,7 +620,7 @@ func TestClusterServiceVerifyAuthRequest(t *testing.T) { func TestConfigureNodeCerts(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) t.Run("Creates new entry when input channel not part of the members list", func(t *testing.T) { t.Parallel() @@ -681,7 +664,7 @@ func TestConfigureNodeCerts(t *testing.T) { func TestExpirationWarning(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) server, stepStream := getStepStream(t) defer server.Stop() @@ -709,7 +692,7 @@ func TestExpirationWarning(t *testing.T) { NodeIdentity: serverKeyPair.Cert, } - bindingHash := cluster.GetSessionBindingHash(&authRequest) + bindingHash := cluster.GetSessionBindingHash(authRequest) authRequest.SessionBinding, _ = cluster.GetTLSSessionBinding(stepStream.Context(), bindingHash) asnSignFields, _ := asn1.Marshal(cluster.AuthRequestSignature{ @@ -732,7 +715,7 @@ func TestExpirationWarning(t *testing.T) { authRequest.Signature = sig stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } @@ -765,10 +748,10 @@ func TestExpirationWarning(t *testing.T) { func TestClusterRequestAsString(t *testing.T) { t.Parallel() - authRequest := nodeAuthRequest + authRequest := proto.Clone(nodeAuthRequest).(*orderer.NodeAuthRequest) stepRequest := &orderer.ClusterNodeServiceStepRequest{ Payload: &orderer.ClusterNodeServiceStepRequest_NodeAuthrequest{ - NodeAuthrequest: &authRequest, + NodeAuthrequest: authRequest, }, } tcs := []struct { diff --git a/orderer/consensus/etcdraft/validator_test.go b/orderer/consensus/etcdraft/validator_test.go index 935cb6e6ed1..f522a446f34 100644 --- a/orderer/consensus/etcdraft/validator_test.go +++ b/orderer/consensus/etcdraft/validator_test.go @@ -11,9 +11,6 @@ import ( "time" "github.com/golang/protobuf/proto" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/hyperledger/fabric-lib-go/bccsp" "github.com/hyperledger/fabric-lib-go/bccsp/sw" raftprotos "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" @@ -22,6 +19,8 @@ import ( "github.com/hyperledger/fabric/orderer/consensus/etcdraft" "github.com/hyperledger/fabric/orderer/consensus/etcdraft/mocks" consensusmocks "github.com/hyperledger/fabric/orderer/consensus/mocks" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) func makeOrdererOrg(caCert []byte) *mocks.OrdererOrg { @@ -130,14 +129,14 @@ var _ = Describe("Metadata Validation", func() { Context("valid old consensus metadata", func() { var ( - metadata raftprotos.ConfigMetadata + metadata *raftprotos.ConfigMetadata oldOrdererConfig *mocks.OrdererConfig newOrdererConfig *mocks.OrdererConfig newChannel bool ) BeforeEach(func() { - metadata = raftprotos.ConfigMetadata{ + metadata = &raftprotos.ConfigMetadata{ Options: &raftprotos.Options{ TickInterval: "500ms", ElectionTick: 10, @@ -167,7 +166,7 @@ var _ = Describe("Metadata Validation", func() { }, } - oldBytes, err := proto.Marshal(&metadata) + oldBytes, err := proto.Marshal(metadata) Expect(err).NotTo(HaveOccurred()) oldOrdererConfig = mockOrderer(oldBytes) org1 := makeOrdererOrg(tlsCA.CertBytes()) @@ -186,9 +185,9 @@ var _ = Describe("Metadata Validation", func() { It("fails when new consensus metadata has invalid options", func() { // NOTE: we are not checking all failures here since tests for CheckConfigMetadata does that - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Options.TickInterval = "" - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) @@ -200,35 +199,35 @@ var _ = Describe("Metadata Validation", func() { }) It("fails when the new consenters are an empty set", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = []*raftprotos.Consenter{} - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) }) It("succeeds when the new consenters are the same as the existing consenters", func() { - newMetadata := metadata - newBytes, err := proto.Marshal(&newMetadata) + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) }) It("succeeds when the new consenters are a subset of the existing consenters", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = newMetadata.Consenters[:2] - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) }) It("fails when the new consenters are not a subset of the existing consenters", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters[2].ClientTlsCert = clientTLSCert(tlsCA) - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) @@ -237,9 +236,9 @@ var _ = Describe("Metadata Validation", func() { It("fails when the new consenter has certificate which not signed by any CA of an orderer org", func() { anotherCa, err := tlsgen.NewCA() Expect(err).NotTo(HaveOccurred()) - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters[2].ClientTlsCert = clientTLSCert(anotherCa) - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) @@ -257,39 +256,39 @@ var _ = Describe("Metadata Validation", func() { }) It("fails when the new consenters are an empty set", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) // NOTE: This also takes care of the case when we remove node from a singleton consenter set newMetadata.Consenters = []*raftprotos.Consenter{} - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) }) It("succeeds when the new consenters are the same as the existing consenters", func() { - newMetadata := metadata - newBytes, err := proto.Marshal(&newMetadata) + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) }) It("succeeds on addition of a single consenter", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = append(newMetadata.Consenters, &raftprotos.Consenter{ Host: "host4", Port: 10004, ClientTlsCert: clientTLSCert(tlsCA), ServerTlsCert: serverTLSCert(tlsCA), }) - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) }) It("fails on addition of more than one consenter", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = append(newMetadata.Consenters, &raftprotos.Consenter{ Host: "host4", @@ -304,39 +303,39 @@ var _ = Describe("Metadata Validation", func() { ServerTlsCert: serverTLSCert(tlsCA), }, ) - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) }) It("succeeds on removal of a single consenter", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = newMetadata.Consenters[:2] - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) }) It("fails on removal of more than one consenter", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = newMetadata.Consenters[:1] - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).NotTo(Succeed()) }) It("succeeds on rotating certs in case of both addition and removal of a node each to reuse the raft NodeId", func() { - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = append(newMetadata.Consenters[:2], &raftprotos.Consenter{ Host: "host4", Port: 10004, ClientTlsCert: clientTLSCert(tlsCA), ServerTlsCert: serverTLSCert(tlsCA), }) - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) @@ -344,9 +343,9 @@ var _ = Describe("Metadata Validation", func() { It("succeeds on removal of inactive node in 2/3 cluster", func() { chain.ActiveNodes.Store([]uint64{1, 2}) - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = newMetadata.Consenters[:2] - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed()) @@ -354,9 +353,9 @@ var _ = Describe("Metadata Validation", func() { It("fails on removal of active node in 2/3 cluster", func() { chain.ActiveNodes.Store([]uint64{1, 2}) - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = newMetadata.Consenters[1:] - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To( @@ -392,9 +391,9 @@ var _ = Describe("Metadata Validation", func() { It("succeeds on removal of inactive node in 2/3 cluster", func() { chain.ActiveNodes.Store([]uint64{2, 3}) // 4 is inactive - newMetadata := metadata + newMetadata := proto.Clone(metadata).(*raftprotos.ConfigMetadata) newMetadata.Consenters = newMetadata.Consenters[:2] - newBytes, err := proto.Marshal(&newMetadata) + newBytes, err := proto.Marshal(newMetadata) Expect(err).NotTo(HaveOccurred()) newOrdererConfig.ConsensusMetadataReturns(newBytes) Expect(chain.ValidateConsensusMetadata(oldOrdererConfig, newOrdererConfig, newChannel)).To(Succeed())