From c1ffe308890fdf5c85869594f8b508c50605d4d0 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 21 Aug 2019 17:11:50 +0800 Subject: [PATCH] reduce some unnecessary parameters Signed-off-by: Ryan Leung --- server/core/store.go | 4 ++-- server/schedulers/balance_leader.go | 17 +++++++++-------- server/schedulers/balance_region.go | 6 +++--- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/server/core/store.go b/server/core/store.go index 851b3e391bb..881ebbbb34c 100644 --- a/server/core/store.go +++ b/server/core/store.go @@ -320,7 +320,7 @@ func (s *StoreInfo) IsLowSpace(lowSpaceRatio float64) bool { return s.GetStoreStats() != nil && s.AvailableRatio() < 1-lowSpaceRatio } -// ResourceCount reutrns count of leader/region in the store. +// ResourceCount returns count of leader/region in the store. func (s *StoreInfo) ResourceCount(kind ResourceKind) uint64 { switch kind { case LeaderKind: @@ -344,7 +344,7 @@ func (s *StoreInfo) ResourceSize(kind ResourceKind) int64 { } } -// ResourceScore reutrns score of leader/region in the store. +// ResourceScore returns score of leader/region in the store. func (s *StoreInfo) ResourceScore(kind ResourceKind, highSpaceRatio, lowSpaceRatio float64, delta int64) float64 { switch kind { case LeaderKind: diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index ca8b4205679..1e439c5115a 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -131,13 +131,12 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*operator. l.counter.WithLabelValues("high_score", sourceAddress, sourceStoreLabel).Inc() l.counter.WithLabelValues("low_score", targetAddress, targetStoreLabel).Inc() - opInfluence := l.opController.GetOpInfluence(cluster) for i := 0; i < balanceLeaderRetryLimit; i++ { - if op := l.transferLeaderOut(source, cluster, opInfluence); op != nil { + if op := l.transferLeaderOut(cluster, source); op != nil { l.counter.WithLabelValues("transfer_out", sourceAddress, sourceStoreLabel).Inc() return op } - if op := l.transferLeaderIn(target, cluster, opInfluence); op != nil { + if op := l.transferLeaderIn(cluster, target); op != nil { l.counter.WithLabelValues("transfer_in", targetAddress, targetStoreLabel).Inc() return op } @@ -155,7 +154,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*operator. // transferLeaderOut transfers leader from the source store. // It randomly selects a health region from the source store, then picks // the best follower peer and transfers the leader. -func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator { +func (l *balanceLeaderScheduler) transferLeaderOut(cluster schedule.Cluster, source *core.StoreInfo) []*operator.Operator { sourceID := source.GetID() region := cluster.RandLeaderRegion(sourceID, core.HealthRegion()) if region == nil { @@ -169,13 +168,13 @@ func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, clust schedulerCounter.WithLabelValues(l.GetName(), "no_target_store").Inc() return nil } - return l.createOperator(region, source, target, cluster, opInfluence) + return l.createOperator(cluster, region, source, target) } // transferLeaderIn transfers leader to the target store. // It randomly selects a health region from the target store, then picks // the worst follower peer and transfers the leader. -func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator { +func (l *balanceLeaderScheduler) transferLeaderIn(cluster schedule.Cluster, target *core.StoreInfo) []*operator.Operator { targetID := target.GetID() region := cluster.RandFollowerRegion(targetID, core.HealthRegion()) if region == nil { @@ -194,14 +193,14 @@ func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluste schedulerCounter.WithLabelValues(l.GetName(), "no_leader").Inc() return nil } - return l.createOperator(region, source, target, cluster, opInfluence) + return l.createOperator(cluster, region, source, target) } // createOperator creates the operator according to the source and target store. // If the region is hot or the difference between the two stores is tolerable, then // no new operator need to be created, otherwise create an operator that transfers // the leader from the source store to the target store for the region. -func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, target *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator { +func (l *balanceLeaderScheduler) createOperator(cluster schedule.Cluster, region *core.RegionInfo, source, target *core.StoreInfo) []*operator.Operator { if cluster.IsRegionHot(region) { log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc() @@ -210,6 +209,8 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, sourceID := source.GetID() targetID := target.GetID() + + opInfluence := l.opController.GetOpInfluence(cluster) if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) { log.Debug("skip balance leader", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID), diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 92ea895a0c3..faf19dec858 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -132,7 +132,6 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator. sourceLabel := strconv.FormatUint(sourceID, 10) s.counter.WithLabelValues("source_store", sourceAddress, sourceLabel).Inc() - opInfluence := s.opController.GetOpInfluence(cluster) for i := 0; i < balanceRegionRetryLimit; i++ { // Priority picks the region that has a pending peer. // Pending region may means the disk is overload, remove the pending region firstly. @@ -169,7 +168,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator. } oldPeer := region.GetStorePeer(sourceID) - if op := s.transferPeer(cluster, region, oldPeer, opInfluence); op != nil { + if op := s.transferPeer(cluster, region, oldPeer); op != nil { schedulerCounter.WithLabelValues(s.GetName(), "new_operator").Inc() return []*operator.Operator{op} } @@ -178,7 +177,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator. } // transferPeer selects the best store to create a new peer to replace the old peer. -func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer, opInfluence operator.OpInfluence) *operator.Operator { +func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer) *operator.Operator { // scoreGuard guarantees that the distinct score will not decrease. stores := cluster.GetRegionStores(region) sourceStoreID := oldPeer.GetStoreId() @@ -205,6 +204,7 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * targetID := target.GetID() log.Debug("", zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID)) + opInfluence := s.opController.GetOpInfluence(cluster) if !shouldBalance(cluster, source, target, region, core.RegionKind, opInfluence) { log.Debug("skip balance region", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID),