Skip to content

Commit

Permalink
Added migration methods for plan store
Browse files Browse the repository at this point in the history
  • Loading branch information
bsrinivas8687 committed Mar 12, 2022
1 parent 25d568e commit 81ac8d2
Show file tree
Hide file tree
Showing 8 changed files with 187 additions and 82 deletions.
4 changes: 2 additions & 2 deletions x/plan/keeper/keeper.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ import (
)

type Keeper struct {
cdc codec.BinaryMarshaler
cdc codec.BinaryCodec
key sdk.StoreKey
provider expected.ProviderKeeper
node expected.NodeKeeper
}

func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey) Keeper {
func NewKeeper(cdc codec.BinaryCodec, key sdk.StoreKey) Keeper {
return Keeper{
cdc: cdc,
key: key,
Expand Down
119 changes: 119 additions & 0 deletions x/plan/keeper/migrations.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
package keeper

import (
"github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"

"github.com/sentinel-official/hub/x/plan/types"
)

type Migrator struct {
k Keeper
}

func NewMigrator(k Keeper) Migrator {
return Migrator{k: k}
}

func (m Migrator) Migrate1to2(ctx sdk.Context) error {
store := m.k.Store(ctx)

if err := migrateActivePlanForProviderKeys(store); err != nil {
return err
}
if err := migrateInactivePlanForProviderKeys(store); err != nil {
return err
}
if err := migrateNodeForPlanKeys(store); err != nil {
return err
}
if err := migrateCountForNodeByProviderKeys(store); err != nil {
return err
}

return nil
}

func migrateActivePlanForProviderKeys(parent sdk.KVStore) error {
child := prefix.NewStore(parent, types.ActivePlanForProviderKeyPrefix)

iterator := child.Iterator(nil, nil)
defer iterator.Close()

for ; iterator.Valid(); iterator.Next() {
var (
addr = iterator.Key()[:20]
id = sdk.BigEndianToUint64(iterator.Key()[20:])
)

key := types.ActivePlanForProviderKey(addr, id)

parent.Set(key, iterator.Value())
child.Delete(iterator.Key())
}

return nil
}

func migrateInactivePlanForProviderKeys(parent sdk.KVStore) error {
child := prefix.NewStore(parent, types.InactivePlanForProviderKeyPrefix)

iterator := child.Iterator(nil, nil)
defer iterator.Close()

for ; iterator.Valid(); iterator.Next() {
var (
addr = iterator.Key()[:20]
id = sdk.BigEndianToUint64(iterator.Key()[20:])
)

key := types.InactivePlanForProviderKey(addr, id)

parent.Set(key, iterator.Value())
child.Delete(iterator.Key())
}

return nil
}

func migrateNodeForPlanKeys(parent sdk.KVStore) error {
child := prefix.NewStore(parent, types.NodeForPlanKeyPrefix)

iterator := child.Iterator(nil, nil)
defer iterator.Close()

for ; iterator.Valid(); iterator.Next() {
var (
id = sdk.BigEndianToUint64(iterator.Key()[:8])
addr = iterator.Key()[8:]
)

key := types.NodeForPlanKey(id, addr)

parent.Set(key, iterator.Value())
child.Delete(iterator.Key())
}

return nil
}

func migrateCountForNodeByProviderKeys(parent sdk.KVStore) error {
child := prefix.NewStore(parent, types.CountForNodeByProviderKeyPrefix)

iterator := child.Iterator(nil, nil)
defer iterator.Close()

for ; iterator.Valid(); iterator.Next() {
var (
provider = iterator.Key()[:20]
node = iterator.Key()[20:]
)

key := types.CountForNodeByProviderKey(provider, node)

parent.Set(key, iterator.Value())
child.Delete(iterator.Key())
}

return nil
}
6 changes: 3 additions & 3 deletions x/plan/keeper/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (

func (k *Keeper) SetNodeForPlan(ctx sdk.Context, id uint64, address hubtypes.NodeAddress) {
key := types.NodeForPlanKey(id, address)
value := k.cdc.MustMarshalBinaryBare(&protobuf.BoolValue{Value: true})
value := k.cdc.MustMarshal(&protobuf.BoolValue{Value: true})

store := k.Store(ctx)
store.Set(key, value)
Expand Down Expand Up @@ -52,7 +52,7 @@ func (k *Keeper) GetNodesForPlan(ctx sdk.Context, id uint64, skip, limit int64)

func (k *Keeper) SetCountForNodeByProvider(ctx sdk.Context, p hubtypes.ProvAddress, n hubtypes.NodeAddress, count uint64) {
key := types.CountForNodeByProviderKey(p, n)
value := k.cdc.MustMarshalBinaryBare(&protobuf.UInt64Value{Value: count})
value := k.cdc.MustMarshal(&protobuf.UInt64Value{Value: count})

store := k.Store(ctx)
store.Set(key, value)
Expand All @@ -68,7 +68,7 @@ func (k *Keeper) GetCountForNodeByProvider(ctx sdk.Context, p hubtypes.ProvAddre
}

var count protobuf.UInt64Value
k.cdc.MustUnmarshalBinaryBare(value, &count)
k.cdc.MustUnmarshal(value, &count)

return count.GetValue()
}
Expand Down
18 changes: 9 additions & 9 deletions x/plan/keeper/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (

func (k *Keeper) SetCount(ctx sdk.Context, count uint64) {
key := types.CountKey
value := k.cdc.MustMarshalBinaryBare(&protobuf.UInt64Value{Value: count})
value := k.cdc.MustMarshal(&protobuf.UInt64Value{Value: count})

store := k.Store(ctx)
store.Set(key, value)
Expand All @@ -26,14 +26,14 @@ func (k *Keeper) GetCount(ctx sdk.Context) uint64 {
}

var count protobuf.UInt64Value
k.cdc.MustUnmarshalBinaryBare(value, &count)
k.cdc.MustUnmarshal(value, &count)

return count.GetValue()
}

func (k *Keeper) SetPlan(ctx sdk.Context, plan types.Plan) {
key := types.PlanKey(plan.Id)
value := k.cdc.MustMarshalBinaryBare(&plan)
value := k.cdc.MustMarshal(&plan)

store := k.Store(ctx)
store.Set(key, value)
Expand All @@ -48,7 +48,7 @@ func (k *Keeper) GetPlan(ctx sdk.Context, id uint64) (plan types.Plan, found boo
return plan, false
}

k.cdc.MustUnmarshalBinaryBare(value, &plan)
k.cdc.MustUnmarshal(value, &plan)
return plan, true
}

Expand All @@ -65,7 +65,7 @@ func (k *Keeper) GetPlans(ctx sdk.Context, skip, limit int64) (items types.Plans
iter.Skip(skip)
iter.Limit(limit, func(iter sdk.Iterator) {
var item types.Plan
k.cdc.MustUnmarshalBinaryBare(iter.Value(), &item)
k.cdc.MustUnmarshal(iter.Value(), &item)
items = append(items, item)
})

Expand All @@ -74,7 +74,7 @@ func (k *Keeper) GetPlans(ctx sdk.Context, skip, limit int64) (items types.Plans

func (k *Keeper) SetActivePlan(ctx sdk.Context, id uint64) {
key := types.ActivePlanKey(id)
value := k.cdc.MustMarshalBinaryBare(&protobuf.BoolValue{Value: true})
value := k.cdc.MustMarshal(&protobuf.BoolValue{Value: true})

store := k.Store(ctx)
store.Set(key, value)
Expand Down Expand Up @@ -108,7 +108,7 @@ func (k *Keeper) GetActivePlans(ctx sdk.Context, skip, limit int64) (items types

func (k *Keeper) SetInactivePlan(ctx sdk.Context, id uint64) {
key := types.InactivePlanKey(id)
value := k.cdc.MustMarshalBinaryBare(&protobuf.BoolValue{Value: true})
value := k.cdc.MustMarshal(&protobuf.BoolValue{Value: true})

store := k.Store(ctx)
store.Set(key, value)
Expand Down Expand Up @@ -142,7 +142,7 @@ func (k *Keeper) GetInactivePlans(ctx sdk.Context, skip, limit int64) (items typ

func (k *Keeper) SetActivePlanForProvider(ctx sdk.Context, address hubtypes.ProvAddress, id uint64) {
key := types.ActivePlanForProviderKey(address, id)
value := k.cdc.MustMarshalBinaryBare(&protobuf.BoolValue{Value: true})
value := k.cdc.MustMarshal(&protobuf.BoolValue{Value: true})

store := k.Store(ctx)
store.Set(key, value)
Expand Down Expand Up @@ -176,7 +176,7 @@ func (k *Keeper) GetActivePlansForProvider(ctx sdk.Context, address hubtypes.Pro

func (k *Keeper) SetInactivePlanForProvider(ctx sdk.Context, address hubtypes.ProvAddress, id uint64) {
key := types.InactivePlanForProviderKey(address, id)
value := k.cdc.MustMarshalBinaryBare(&protobuf.BoolValue{Value: true})
value := k.cdc.MustMarshal(&protobuf.BoolValue{Value: true})

store := k.Store(ctx)
store.Set(key, value)
Expand Down
4 changes: 2 additions & 2 deletions x/plan/keeper/query_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (q *queryServer) QueryPlans(c context.Context, req *types.QueryPlansRequest
store := prefix.NewStore(q.Store(ctx), types.PlanKeyPrefix)
pagination, err = query.FilteredPaginate(store, req.Pagination, func(_, value []byte, accumulate bool) (bool, error) {
var item types.Plan
if err := q.cdc.UnmarshalBinaryBare(value, &item); err != nil {
if err := q.cdc.Unmarshal(value, &item); err != nil {
return false, err
}

Expand Down Expand Up @@ -154,7 +154,7 @@ func (q *queryServer) QueryPlansForProvider(c context.Context, req *types.QueryP
store := prefix.NewStore(q.Store(ctx), types.PlanKeyPrefix)
pagination, err = query.FilteredPaginate(store, req.Pagination, func(_, value []byte, accumulate bool) (bool, error) {
var item types.Plan
if err := q.cdc.UnmarshalBinaryBare(value, &item); err != nil {
if err := q.cdc.Unmarshal(value, &item); err != nil {
return false, err
}
if !strings.EqualFold(item.Provider, req.Address) {
Expand Down
34 changes: 17 additions & 17 deletions x/plan/simulation/decoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,55 +11,55 @@ import (
"github.com/sentinel-official/hub/x/plan/types"
)

func NewStoreDecoder(cdc codec.Marshaler) func(kvA, kvB kv.Pair) string {
func NewStoreDecoder(cdc codec.Codec) func(kvA, kvB kv.Pair) string {
return func(kvA, kvB kv.Pair) string {
switch {
case bytes.Equal(kvA.Key[:1], types.CountKey):
var countA, countB protobuftypes.UInt64Value
cdc.MustUnmarshalBinaryBare(kvA.Value, &countA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &countB)
cdc.MustUnmarshal(kvA.Value, &countA)
cdc.MustUnmarshal(kvB.Value, &countB)

return fmt.Sprintf("%v\n%v", &countA, &countB)
case bytes.Equal(kvA.Key[:1], types.PlanKeyPrefix):
var planA, planB types.Plan
cdc.MustUnmarshalBinaryBare(kvA.Value, &planA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &planB)
cdc.MustUnmarshal(kvA.Value, &planA)
cdc.MustUnmarshal(kvB.Value, &planB)

return fmt.Sprintf("%v\n%v", &planA, &planB)
case bytes.Equal(kvA.Key[:1], types.ActivePlanKeyPrefix):
var activePlanA, activePlanB protobuftypes.BoolValue
cdc.MustUnmarshalBinaryBare(kvA.Value, &activePlanA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &activePlanB)
cdc.MustUnmarshal(kvA.Value, &activePlanA)
cdc.MustUnmarshal(kvB.Value, &activePlanB)

return fmt.Sprintf("%v\n%v", &activePlanA, &activePlanB)
case bytes.Equal(kvA.Key[:1], types.InactivePlanKeyPrefix):
var inactivePlanA, inactivePlanB protobuftypes.BoolValue
cdc.MustUnmarshalBinaryBare(kvA.Value, &inactivePlanA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &inactivePlanB)
cdc.MustUnmarshal(kvA.Value, &inactivePlanA)
cdc.MustUnmarshal(kvB.Value, &inactivePlanB)

return fmt.Sprintf("%v\n%v", &inactivePlanA, &inactivePlanB)
case bytes.Equal(kvA.Key[:1], types.ActivePlanForProviderKeyPrefix):
var activePlanForProviderA, activePlanForProviderB protobuftypes.BoolValue
cdc.MustUnmarshalBinaryBare(kvA.Value, &activePlanForProviderA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &activePlanForProviderB)
cdc.MustUnmarshal(kvA.Value, &activePlanForProviderA)
cdc.MustUnmarshal(kvB.Value, &activePlanForProviderB)

return fmt.Sprintf("%v\n%v", &activePlanForProviderA, &activePlanForProviderB)
case bytes.Equal(kvA.Key[:1], types.InactivePlanForProviderKeyPrefix):
var inactivePlanForProviderA, inactivePlanForProviderB protobuftypes.BoolValue
cdc.MustUnmarshalBinaryBare(kvA.Value, &inactivePlanForProviderA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &inactivePlanForProviderB)
cdc.MustUnmarshal(kvA.Value, &inactivePlanForProviderA)
cdc.MustUnmarshal(kvB.Value, &inactivePlanForProviderB)

return fmt.Sprintf("%v\n%v", &inactivePlanForProviderA, &inactivePlanForProviderB)
case bytes.Equal(kvA.Key[:1], types.NodeForPlanKeyPrefix):
var nodeForPlanA, nodeForPlanB protobuftypes.BoolValue
cdc.MustUnmarshalBinaryBare(kvA.Value, &nodeForPlanA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &nodeForPlanB)
cdc.MustUnmarshal(kvA.Value, &nodeForPlanA)
cdc.MustUnmarshal(kvB.Value, &nodeForPlanB)

return fmt.Sprintf("%v\n%v", &nodeForPlanA, &nodeForPlanB)
case bytes.Equal(kvA.Key[:1], types.CountForNodeByProviderKeyPrefix):
var countForNodeByProviderA, countForNodeByProviderB protobuftypes.UInt64Value
cdc.MustUnmarshalBinaryBare(kvA.Value, &countForNodeByProviderA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &countForNodeByProviderB)
cdc.MustUnmarshal(kvA.Value, &countForNodeByProviderA)
cdc.MustUnmarshal(kvB.Value, &countForNodeByProviderB)

return fmt.Sprintf("%v\n%v", &countForNodeByProviderA, &countForNodeByProviderB)
}
Expand Down
10 changes: 5 additions & 5 deletions x/plan/simulation/operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ var (

func WeightedOperations(
params simulationtypes.AppParams,
cdc codec.JSONMarshaler,
cdc codec.JSONCodec,
ak expected.AccountKeeper,
bk expected.BankKeeper,
k keeper.Keeper,
Expand Down Expand Up @@ -168,7 +168,7 @@ func SimulateMsgAddRequest(ak expected.AccountKeeper, bk expected.BankKeeper, k
return simulationtypes.NoOpMsg(types.ModuleName, types.TypeMsgAddRequest, err.Error()), nil, err
}

return simulationtypes.NewOperationMsg(message, true, ""), nil, nil
return simulationtypes.NewOperationMsg(message, true, "", nil), nil, nil
}
}

Expand Down Expand Up @@ -237,7 +237,7 @@ func SimulateMsgSetStatusRequest(ak expected.AccountKeeper, bk expected.BankKeep
return simulationtypes.NoOpMsg(types.ModuleName, types.TypeMsgSetStatusRequest, err.Error()), nil, err
}

return simulationtypes.NewOperationMsg(message, true, ""), nil, nil
return simulationtypes.NewOperationMsg(message, true, "", nil), nil, nil
}
}

Expand Down Expand Up @@ -316,7 +316,7 @@ func SimulateMsgAddNodeRequest(ak expected.AccountKeeper, bk expected.BankKeeper
return simulationtypes.NoOpMsg(types.ModuleName, types.TypeMsgAddNodeRequest, err.Error()), nil, err
}

return simulationtypes.NewOperationMsg(message, true, ""), nil, nil
return simulationtypes.NewOperationMsg(message, true, "", nil), nil, nil
}
}

Expand Down Expand Up @@ -387,6 +387,6 @@ func SimulateMsgRemoveNodeRequest(ak expected.AccountKeeper, bk expected.BankKee
return simulationtypes.NoOpMsg(types.ModuleName, types.TypeMsgRemoveNodeRequest, err.Error()), nil, err
}

return simulationtypes.NewOperationMsg(message, true, ""), nil, nil
return simulationtypes.NewOperationMsg(message, true, "", nil), nil, nil
}
}
Loading

0 comments on commit 81ac8d2

Please sign in to comment.