Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: reduce some unnecessary parameters #1698

Merged
merged 4 commits into from
Aug 23, 2019
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions server/core/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ func (s *StoreInfo) IsLowSpace(lowSpaceRatio float64) bool {
return s.GetStoreStats() != nil && s.AvailableRatio() < 1-lowSpaceRatio
}

// ResourceCount reutrns count of leader/region in the store.
// ResourceCount returns count of leader/region in the store.
func (s *StoreInfo) ResourceCount(kind ResourceKind) uint64 {
switch kind {
case LeaderKind:
Expand All @@ -344,7 +344,7 @@ func (s *StoreInfo) ResourceSize(kind ResourceKind) int64 {
}
}

// ResourceScore reutrns score of leader/region in the store.
// ResourceScore returns score of leader/region in the store.
func (s *StoreInfo) ResourceScore(kind ResourceKind, highSpaceRatio, lowSpaceRatio float64, delta int64) float64 {
switch kind {
case LeaderKind:
Expand Down
17 changes: 9 additions & 8 deletions server/schedulers/balance_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,13 +131,12 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*operator.
l.counter.WithLabelValues("high_score", sourceAddress, sourceStoreLabel).Inc()
l.counter.WithLabelValues("low_score", targetAddress, targetStoreLabel).Inc()

opInfluence := l.opController.GetOpInfluence(cluster)
for i := 0; i < balanceLeaderRetryLimit; i++ {
if op := l.transferLeaderOut(source, cluster, opInfluence); op != nil {
if op := l.transferLeaderOut(cluster, source); op != nil {
l.counter.WithLabelValues("transfer_out", sourceAddress, sourceStoreLabel).Inc()
return op
}
if op := l.transferLeaderIn(target, cluster, opInfluence); op != nil {
if op := l.transferLeaderIn(cluster, target); op != nil {
l.counter.WithLabelValues("transfer_in", targetAddress, targetStoreLabel).Inc()
return op
}
Expand All @@ -155,7 +154,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*operator.
// transferLeaderOut transfers leader from the source store.
// It randomly selects a health region from the source store, then picks
// the best follower peer and transfers the leader.
func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator {
func (l *balanceLeaderScheduler) transferLeaderOut(cluster schedule.Cluster, source *core.StoreInfo) []*operator.Operator {
sourceID := source.GetID()
region := cluster.RandLeaderRegion(sourceID, core.HealthRegion())
if region == nil {
Expand All @@ -169,13 +168,13 @@ func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, clust
schedulerCounter.WithLabelValues(l.GetName(), "no_target_store").Inc()
return nil
}
return l.createOperator(region, source, target, cluster, opInfluence)
return l.createOperator(cluster, region, source, target)
}

// transferLeaderIn transfers leader to the target store.
// It randomly selects a health region from the target store, then picks
// the worst follower peer and transfers the leader.
func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator {
func (l *balanceLeaderScheduler) transferLeaderIn(cluster schedule.Cluster, target *core.StoreInfo) []*operator.Operator {
targetID := target.GetID()
region := cluster.RandFollowerRegion(targetID, core.HealthRegion())
if region == nil {
Expand All @@ -194,14 +193,14 @@ func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluste
schedulerCounter.WithLabelValues(l.GetName(), "no_leader").Inc()
return nil
}
return l.createOperator(region, source, target, cluster, opInfluence)
return l.createOperator(cluster, region, source, target)
}

// createOperator creates the operator according to the source and target store.
// If the region is hot or the difference between the two stores is tolerable, then
// no new operator need to be created, otherwise create an operator that transfers
// the leader from the source store to the target store for the region.
func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, target *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator {
func (l *balanceLeaderScheduler) createOperator(cluster schedule.Cluster, region *core.RegionInfo, source, target *core.StoreInfo) []*operator.Operator {
if cluster.IsRegionHot(region) {
log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()))
schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc()
Expand All @@ -210,6 +209,8 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source,

sourceID := source.GetID()
targetID := target.GetID()

opInfluence := l.opController.GetOpInfluence(cluster)
if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) {
log.Debug("skip balance leader",
zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID),
Expand Down
6 changes: 3 additions & 3 deletions server/schedulers/balance_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,6 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator.
sourceLabel := strconv.FormatUint(sourceID, 10)
s.counter.WithLabelValues("source_store", sourceAddress, sourceLabel).Inc()

opInfluence := s.opController.GetOpInfluence(cluster)
for i := 0; i < balanceRegionRetryLimit; i++ {
// Priority picks the region that has a pending peer.
// Pending region may means the disk is overload, remove the pending region firstly.
Expand Down Expand Up @@ -169,7 +168,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator.
}

oldPeer := region.GetStorePeer(sourceID)
if op := s.transferPeer(cluster, region, oldPeer, opInfluence); op != nil {
if op := s.transferPeer(cluster, region, oldPeer); op != nil {
schedulerCounter.WithLabelValues(s.GetName(), "new_operator").Inc()
return []*operator.Operator{op}
}
Expand All @@ -178,7 +177,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator.
}

// transferPeer selects the best store to create a new peer to replace the old peer.
func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer, opInfluence operator.OpInfluence) *operator.Operator {
func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *core.RegionInfo, oldPeer *metapb.Peer) *operator.Operator {
// scoreGuard guarantees that the distinct score will not decrease.
stores := cluster.GetRegionStores(region)
sourceStoreID := oldPeer.GetStoreId()
Expand All @@ -205,6 +204,7 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region *
targetID := target.GetID()
log.Debug("", zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID))

opInfluence := s.opController.GetOpInfluence(cluster)
if !shouldBalance(cluster, source, target, region, core.RegionKind, opInfluence) {
log.Debug("skip balance region",
zap.String("scheduler", s.GetName()), zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID),
Expand Down